-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
+/*
+ * GPL HEADER START
*
- * Lustre Lite I/O page cache routines shared by different kernel revs
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * Copyright (c) 2001-2003 Cluster File Systems, Inc.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * This file is part of Lustre, http://www.lustre.org.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
*
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * GPL HEADER END
*/
-#ifndef AUTOCONF_INCLUDED
-#include <linux/config.h>
-#endif
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/llite/rw.c
+ *
+ * Lustre Lite I/O page cache routines shared by different kernel revs
+ */
+
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/errno.h>
-#include <linux/smp_lock.h>
#include <linux/unistd.h>
-#include <linux/version.h>
-#include <asm/system.h>
+#include <linux/writeback.h>
#include <asm/uaccess.h>
#include <linux/fs.h>
#include <linux/stat.h>
#include <asm/uaccess.h>
-#ifdef HAVE_SEGMENT_H
-# include <asm/segment.h>
-#endif
#include <linux/mm.h>
#include <linux/pagemap.h>
-#include <linux/smp_lock.h>
+/* current_is_kswapd() */
+#include <linux/swap.h>
#define DEBUG_SUBSYSTEM S_LLITE
-//#include <lustre_mdc.h>
#include <lustre_lite.h>
+#include <obd_cksum.h>
#include "llite_internal.h"
#include <linux/lustre_compat25.h>
-#ifndef list_for_each_prev_safe
-#define list_for_each_prev_safe(pos, n, head) \
- for (pos = (head)->prev, n = pos->prev; pos != (head); \
- pos = n, n = pos->prev )
-#endif
-
-cfs_mem_cache_t *ll_async_page_slab = NULL;
-size_t ll_async_page_slab_size = 0;
-
-/* SYNCHRONOUS I/O to object storage for an inode */
-static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
- struct page *page, int flags)
+/**
+ * Finalizes cl-data before exiting typical address_space operation. Dual to
+ * ll_cl_init().
+ */
+static void ll_cl_fini(struct ll_cl_context *lcc)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- struct obd_info oinfo = { { { 0 } } };
- struct brw_page pg;
- int opc, rc;
- ENTRY;
+ struct lu_env *env = lcc->lcc_env;
+ struct cl_io *io = lcc->lcc_io;
+ struct cl_page *page = lcc->lcc_page;
- pg.pg = page;
- pg.off = ((obd_off)page->index) << CFS_PAGE_SHIFT;
+ LASSERT(lcc->lcc_cookie == current);
+ LASSERT(env != NULL);
- if ((cmd & OBD_BRW_WRITE) && (pg.off+CFS_PAGE_SIZE>i_size_read(inode)))
- pg.count = i_size_read(inode) % CFS_PAGE_SIZE;
- else
- pg.count = CFS_PAGE_SIZE;
-
- LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
- cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
- inode->i_ino, pg.off, pg.off);
- if (pg.count == 0) {
- CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
- LPU64"\n", inode->i_ino, inode, i_size_read(inode),
- page->mapping->host, i_size_read(page->mapping->host),
- page->index, pg.off);
+ if (page != NULL) {
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
}
- pg.flag = flags;
-
- if (cmd & OBD_BRW_WRITE)
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_WRITE,
- pg.count);
- else
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_READ,
- pg.count);
- oinfo.oi_oa = oa;
- oinfo.oi_md = lsm;
- /* NB partial write, so we might not have CAPA_OPC_OSS_READ capa */
- opc = cmd & OBD_BRW_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
- oinfo.oi_capa = ll_osscapa_get(inode, opc);
- rc = obd_brw(cmd, ll_i2dtexp(inode), &oinfo, 1, &pg, NULL);
- capa_put(oinfo.oi_capa);
- if (rc == 0)
- obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
- else if (rc != -EIO)
- CERROR("error from obd_brw: rc = %d\n", rc);
- RETURN(rc);
+ if (io && lcc->lcc_created) {
+ cl_io_end(env, io);
+ cl_io_unlock(env, io);
+ cl_io_iter_fini(env, io);
+ cl_io_fini(env, io);
+ }
+ cl_env_put(env, &lcc->lcc_refcheck);
}
-/* this isn't where truncate starts. roughly:
- * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate. setattr_raw grabs
- * DLM lock on [size, EOF], i_mutex, ->lli_size_sem, and WRITE_I_ALLOC_SEM to
- * avoid races.
- *
- * must be called under ->lli_size_sem */
-void ll_truncate(struct inode *inode)
+/**
+ * Initializes common cl-data at the typical address_space operation entry
+ * point.
+ */
+static struct ll_cl_context *ll_cl_init(struct file *file,
+ struct page *vmpage, int create)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct obd_info oinfo = { { { 0 } } };
- struct ost_lvb lvb;
- struct obdo oa;
- int rc;
- ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu=%#Lx\n",inode->i_ino,
- inode->i_generation, inode, i_size_read(inode),
- i_size_read(inode));
-
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
- if (lli->lli_size_sem_owner != current) {
- EXIT;
- return;
- }
-
- if (!lli->lli_smd) {
- CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
- inode->i_ino);
- GOTO(out_unlock, 0);
- }
-
- LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
-
- /* XXX I'm pretty sure this is a hack to paper over a more fundamental
- * race condition. */
- lov_stripe_lock(lli->lli_smd);
- inode_init_lvb(inode, &lvb);
- rc = obd_merge_lvb(ll_i2dtexp(inode), lli->lli_smd, &lvb, 0);
- if (lvb.lvb_size == i_size_read(inode) && rc == 0) {
- CDEBUG(D_VFSTRACE, "skipping punch for obj "LPX64", %Lu=%#Lx\n",
- lli->lli_smd->lsm_object_id, i_size_read(inode),
- i_size_read(inode));
- lov_stripe_unlock(lli->lli_smd);
- GOTO(out_unlock, 0);
- }
-
- obd_adjust_kms(ll_i2dtexp(inode), lli->lli_smd, i_size_read(inode), 1);
- lov_stripe_unlock(lli->lli_smd);
-
- if (unlikely((ll_i2sbi(inode)->ll_flags & LL_SBI_CHECKSUM) &&
- (i_size_read(inode) & ~CFS_PAGE_MASK))) {
- /* If the truncate leaves behind a partial page, update its
- * checksum. */
- struct page *page = find_get_page(inode->i_mapping,
- i_size_read(inode) >>
- CFS_PAGE_SHIFT);
- if (page != NULL) {
- struct ll_async_page *llap = llap_cast_private(page);
- if (llap != NULL) {
- char *kaddr = kmap_atomic(page, KM_USER0);
- llap->llap_checksum =
- init_checksum(OSC_DEFAULT_CKSUM);
- llap->llap_checksum =
- compute_checksum(llap->llap_checksum,
- kaddr, CFS_PAGE_SIZE,
- OSC_DEFAULT_CKSUM);
- kunmap_atomic(kaddr, KM_USER0);
+ struct ll_cl_context *lcc;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct cl_object *clob;
+ struct ccc_io *cio;
+
+ int refcheck;
+ int result = 0;
+
+ clob = ll_i2info(vmpage->mapping->host)->lli_clob;
+ LASSERT(clob != NULL);
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return ERR_PTR(PTR_ERR(env));
+
+ lcc = &vvp_env_info(env)->vti_io_ctx;
+ memset(lcc, 0, sizeof(*lcc));
+ lcc->lcc_env = env;
+ lcc->lcc_refcheck = refcheck;
+ lcc->lcc_cookie = current;
+
+ cio = ccc_env_io(env);
+ io = cio->cui_cl.cis_io;
+ if (io == NULL && create) {
+ struct inode *inode = vmpage->mapping->host;
+ loff_t pos;
+
+ if (mutex_trylock(&inode->i_mutex)) {
+ mutex_unlock(&(inode)->i_mutex);
+
+ /* this is too bad. Someone is trying to write the
+ * page w/o holding inode mutex. This means we can
+ * add dirty pages into cache during truncate */
+ CERROR("Proc %s is dirting page w/o inode lock, this"
+ "will break truncate.\n", cfs_current()->comm);
+ libcfs_debug_dumpstack(NULL);
+ LBUG();
+ return ERR_PTR(-EIO);
+ }
+
+ /*
+ * Loop-back driver calls ->prepare_write() and ->sendfile()
+ * methods directly, bypassing file system ->write() operation,
+ * so cl_io has to be created here.
+ */
+ io = ccc_env_thread_io(env);
+ ll_io_init(io, file, 1);
+
+ /* No lock at all for this kind of IO - we can't do it because
+ * we have held page lock, it would cause deadlock.
+ * XXX: This causes poor performance to loop device - One page
+ * per RPC.
+ * In order to get better performance, users should use
+ * lloop driver instead.
+ */
+ io->ci_lockreq = CILR_NEVER;
+
+ pos = (vmpage->index << CFS_PAGE_SHIFT);
+
+ /* Create a temp IO to serve write. */
+ result = cl_io_rw_init(env, io, CIT_WRITE, pos, CFS_PAGE_SIZE);
+ if (result == 0) {
+ cio->cui_fd = LUSTRE_FPRIVATE(file);
+ cio->cui_iov = NULL;
+ cio->cui_nrsegs = 0;
+ result = cl_io_iter_init(env, io);
+ if (result == 0) {
+ result = cl_io_lock(env, io);
+ if (result == 0)
+ result = cl_io_start(env, io);
}
- page_cache_release(page);
- }
- }
-
- CDEBUG(D_INFO, "calling punch for "LPX64" (new size %Lu=%#Lx)\n",
- lli->lli_smd->lsm_object_id, i_size_read(inode), i_size_read(inode));
-
- oinfo.oi_md = lli->lli_smd;
- oinfo.oi_policy.l_extent.start = i_size_read(inode);
- oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
- oinfo.oi_oa = &oa;
- oa.o_id = lli->lli_smd->lsm_object_id;
- oa.o_gr = lli->lli_smd->lsm_object_gr;
- oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
-
- obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |
- OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME |
- OBD_MD_FLFID | OBD_MD_FLGENER);
-
- ll_inode_size_unlock(inode, 0);
-
- oinfo.oi_capa = ll_osscapa_get(inode, CAPA_OPC_OSS_TRUNC);
- rc = obd_punch_rqset(ll_i2dtexp(inode), &oinfo, NULL);
- ll_truncate_free_capa(oinfo.oi_capa);
- if (rc)
- CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
- else
- obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
- OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
- EXIT;
- return;
-
- out_unlock:
- ll_inode_size_unlock(inode, 0);
-} /* ll_truncate */
-
-int ll_prepare_write(struct file *file, struct page *page, unsigned from,
- unsigned to)
-{
- struct inode *inode = page->mapping->host;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- obd_off offset = ((obd_off)page->index) << CFS_PAGE_SHIFT;
- struct obd_info oinfo = { { { 0 } } };
- struct brw_page pga;
- struct obdo oa;
- struct ost_lvb lvb;
- int rc = 0;
- ENTRY;
-
- LASSERT(PageLocked(page));
- (void)llap_cast_private(page); /* assertion */
-
- /* Check to see if we should return -EIO right away */
- pga.pg = page;
- pga.off = offset;
- pga.count = CFS_PAGE_SIZE;
- pga.flag = 0;
-
- oa.o_mode = inode->i_mode;
- oa.o_id = lsm->lsm_object_id;
- oa.o_gr = lsm->lsm_object_gr;
- oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE |
- OBD_MD_FLTYPE | OBD_MD_FLGROUP;
- obdo_from_inode(&oa, inode, OBD_MD_FLFID | OBD_MD_FLGENER);
-
- oinfo.oi_oa = &oa;
- oinfo.oi_md = lsm;
- rc = obd_brw(OBD_BRW_CHECK, ll_i2dtexp(inode), &oinfo, 1, &pga, NULL);
- if (rc)
- RETURN(rc);
-
- if (PageUptodate(page)) {
- LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
- RETURN(0);
- }
-
- /* We're completely overwriting an existing page, so _don't_ set it up
- * to date until commit_write */
- if (from == 0 && to == CFS_PAGE_SIZE) {
- LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
- POISON_PAGE(page, 0x11);
- RETURN(0);
+ } else
+ result = io->ci_result;
+ lcc->lcc_created = 1;
}
- /* If are writing to a new page, no need to read old data. The extent
- * locking will have updated the KMS, and for our purposes here we can
- * treat it like i_size. */
- lov_stripe_lock(lsm);
- inode_init_lvb(inode, &lvb);
- obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
- lov_stripe_unlock(lsm);
- if (lvb.lvb_size <= offset) {
- char *kaddr = kmap_atomic(page, KM_USER0);
- LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
- lvb.lvb_size, offset);
- memset(kaddr, 0, CFS_PAGE_SIZE);
- kunmap_atomic(kaddr, KM_USER0);
- GOTO(prepare_done, rc = 0);
+ lcc->lcc_io = io;
+ if (io == NULL)
+ result = -EIO;
+ if (result == 0) {
+ struct cl_page *page;
+
+ LASSERT(io != NULL);
+ LASSERT(io->ci_state == CIS_IO_GOING);
+ LASSERT(cio->cui_fd == LUSTRE_FPRIVATE(file));
+ page = cl_page_find(env, clob, vmpage->index, vmpage,
+ CPT_CACHEABLE);
+ if (!IS_ERR(page)) {
+ lcc->lcc_page = page;
+ lu_ref_add(&page->cp_reference, "cl_io", io);
+ result = 0;
+ } else
+ result = PTR_ERR(page);
}
-
- /* XXX could be an async ocp read.. read-ahead? */
- rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
- if (rc == 0) {
- /* bug 1598: don't clobber blksize */
- oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
- obdo_refresh_inode(inode, &oa, oa.o_valid);
+ if (result) {
+ ll_cl_fini(lcc);
+ lcc = ERR_PTR(result);
}
- EXIT;
- prepare_done:
- if (rc == 0)
- SetPageUptodate(page);
-
- return rc;
+ CDEBUG(D_VFSTRACE, "%lu@"DFID" -> %d %p %p\n",
+ vmpage->index, PFID(lu_object_fid(&clob->co_lu)), result,
+ env, io);
+ return lcc;
}
-static int ll_ap_make_ready(void *data, int cmd)
+static struct ll_cl_context *ll_cl_get(void)
{
- struct ll_async_page *llap;
- struct page *page;
- ENTRY;
-
- llap = LLAP_FROM_COOKIE(data);
- page = llap->llap_page;
-
- LASSERTF(!(cmd & OBD_BRW_READ), "cmd %x page %p ino %lu index %lu\n", cmd, page,
- page->mapping->host->i_ino, page->index);
-
- /* we're trying to write, but the page is locked.. come back later */
- if (TryLockPage(page))
- RETURN(-EAGAIN);
-
- LASSERT(!PageWriteback(page));
-
- /* if we left PageDirty we might get another writepage call
- * in the future. list walkers are bright enough
- * to check page dirty so we can leave it on whatever list
- * its on. XXX also, we're called with the cli list so if
- * we got the page cache list we'd create a lock inversion
- * with the removepage path which gets the page lock then the
- * cli lock */
- LASSERTF(!PageWriteback(page),"cmd %x page %p ino %lu index %lu\n", cmd, page,
- page->mapping->host->i_ino, page->index);
- clear_page_dirty_for_io(page);
-
- /* This actually clears the dirty bit in the radix tree.*/
- set_page_writeback(page);
-
- LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
- page_cache_get(page);
-
- RETURN(0);
+ struct ll_cl_context *lcc;
+ struct lu_env *env;
+ int refcheck;
+
+ env = cl_env_get(&refcheck);
+ LASSERT(!IS_ERR(env));
+ lcc = &vvp_env_info(env)->vti_io_ctx;
+ LASSERT(env == lcc->lcc_env);
+ LASSERT(current == lcc->lcc_cookie);
+ cl_env_put(env, &refcheck);
+
+ /* env has got in ll_cl_init, so it is still usable. */
+ return lcc;
}
-/* We have two reasons for giving llite the opportunity to change the
- * write length of a given queued page as it builds the RPC containing
- * the page:
- *
- * 1) Further extending writes may have landed in the page cache
- * since a partial write first queued this page requiring us
- * to write more from the page cache. (No further races are possible, since
- * by the time this is called, the page is locked.)
- * 2) We might have raced with truncate and want to avoid performing
- * write RPCs that are just going to be thrown away by the
- * truncate's punch on the storage targets.
- *
- * The kms serves these purposes as it is set at both truncate and extending
- * writes.
+/**
+ * ->prepare_write() address space operation called by generic_file_write()
+ * for every page during write.
*/
-static int ll_ap_refresh_count(void *data, int cmd)
-{
- struct ll_inode_info *lli;
- struct ll_async_page *llap;
- struct lov_stripe_md *lsm;
- struct page *page;
- struct inode *inode;
- struct ost_lvb lvb;
- __u64 kms;
- ENTRY;
-
- /* readpage queues with _COUNT_STABLE, shouldn't get here. */
- LASSERT(cmd != OBD_BRW_READ);
-
- llap = LLAP_FROM_COOKIE(data);
- page = llap->llap_page;
- inode = page->mapping->host;
- lli = ll_i2info(inode);
- lsm = lli->lli_smd;
-
- lov_stripe_lock(lsm);
- inode_init_lvb(inode, &lvb);
- obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
- kms = lvb.lvb_size;
- lov_stripe_unlock(lsm);
-
- /* catch race with truncate */
- if (((__u64)page->index << CFS_PAGE_SHIFT) >= kms)
- return 0;
-
- /* catch sub-page write at end of file */
- if (((__u64)page->index << CFS_PAGE_SHIFT) + CFS_PAGE_SIZE > kms)
- return kms % CFS_PAGE_SIZE;
-
- return CFS_PAGE_SIZE;
-}
-
-void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
-{
- struct lov_stripe_md *lsm;
- obd_flag valid_flags;
-
- lsm = ll_i2info(inode)->lli_smd;
-
- oa->o_id = lsm->lsm_object_id;
- oa->o_gr = lsm->lsm_object_gr;
- oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
- valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
- if (cmd & OBD_BRW_WRITE) {
- oa->o_valid |= OBD_MD_FLEPOCH;
- oa->o_easize = ll_i2info(inode)->lli_ioepoch;
-
- valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
- OBD_MD_FLUID | OBD_MD_FLGID |
- OBD_MD_FLFID | OBD_MD_FLGENER;
- }
-
- obdo_from_inode(oa, inode, valid_flags);
-}
-
-static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
-{
- struct ll_async_page *llap;
- ENTRY;
-
- llap = LLAP_FROM_COOKIE(data);
- ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
-
- EXIT;
-}
-
-static void ll_ap_update_obdo(void *data, int cmd, struct obdo *oa,
- obd_valid valid)
-{
- struct ll_async_page *llap;
- ENTRY;
-
- llap = LLAP_FROM_COOKIE(data);
- obdo_from_inode(oa, llap->llap_page->mapping->host, valid);
-
- EXIT;
-}
-
-static struct obd_capa *ll_ap_lookup_capa(void *data, int cmd)
-{
- struct ll_async_page *llap = LLAP_FROM_COOKIE(data);
- int opc = cmd & OBD_BRW_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
-
- return ll_osscapa_get(llap->llap_page->mapping->host, opc);
-}
-
-static struct obd_async_page_ops ll_async_page_ops = {
- .ap_make_ready = ll_ap_make_ready,
- .ap_refresh_count = ll_ap_refresh_count,
- .ap_fill_obdo = ll_ap_fill_obdo,
- .ap_update_obdo = ll_ap_update_obdo,
- .ap_completion = ll_ap_completion,
- .ap_lookup_capa = ll_ap_lookup_capa,
-};
-
-struct ll_async_page *llap_cast_private(struct page *page)
-{
- struct ll_async_page *llap = (struct ll_async_page *)page_private(page);
-
- LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
- "page %p private %lu gave magic %d which != %d\n",
- page, page_private(page), llap->llap_magic, LLAP_MAGIC);
-
- return llap;
-}
-
-/* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
- *
- * There is an llap attached onto every page in lustre, linked off @sbi.
- * We add an llap to the list so we don't lose our place during list walking.
- * If llaps in the list are being moved they will only move to the end
- * of the LRU, and we aren't terribly interested in those pages here (we
- * start at the beginning of the list where the least-used llaps are.
- */
-int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
-{
- struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
- unsigned long total, want, count = 0;
-
- total = sbi->ll_async_page_count;
-
- /* There can be a large number of llaps (600k or more in a large
- * memory machine) so the VM 1/6 shrink ratio is likely too much.
- * Since we are freeing pages also, we don't necessarily want to
- * shrink so much. Limit to 40MB of pages + llaps per call. */
- if (shrink_fraction == 0)
- want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
- else
- want = (total + shrink_fraction - 1) / shrink_fraction;
-
- if (want > 40 << (20 - CFS_PAGE_SHIFT))
- want = 40 << (20 - CFS_PAGE_SHIFT);
-
- CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
- want, total, shrink_fraction);
-
- spin_lock(&sbi->ll_lock);
- list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
-
- while (--total >= 0 && count < want) {
- struct page *page;
- int keep;
-
- if (unlikely(need_resched())) {
- spin_unlock(&sbi->ll_lock);
- cond_resched();
- spin_lock(&sbi->ll_lock);
- }
-
- llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
- list_del_init(&dummy_llap.llap_pglist_item);
- if (llap == NULL)
- break;
-
- page = llap->llap_page;
- LASSERT(page != NULL);
-
- list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
-
- /* Page needs/undergoing IO */
- if (TryLockPage(page)) {
- LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
- continue;
- }
-
- keep = (llap->llap_write_queued || PageDirty(page) ||
- PageWriteback(page) || (!PageUptodate(page) &&
- llap->llap_origin != LLAP_ORIGIN_READAHEAD));
-
- LL_CDEBUG_PAGE(D_PAGE, page,"%s LRU page: %s%s%s%s%s origin %s\n",
- keep ? "keep" : "drop",
- llap->llap_write_queued ? "wq " : "",
- PageDirty(page) ? "pd " : "",
- PageUptodate(page) ? "" : "!pu ",
- PageWriteback(page) ? "wb" : "",
- llap->llap_defer_uptodate ? "" : "!du",
- llap_origins[llap->llap_origin]);
-
- /* If page is dirty or undergoing IO don't discard it */
- if (keep) {
- unlock_page(page);
- continue;
- }
-
- page_cache_get(page);
- spin_unlock(&sbi->ll_lock);
-
- if (page->mapping != NULL) {
- ll_teardown_mmaps(page->mapping,
- (__u64)page->index << CFS_PAGE_SHIFT,
- ((__u64)page->index << CFS_PAGE_SHIFT)|
- ~CFS_PAGE_MASK);
- if (!PageDirty(page) && !page_mapped(page)) {
- ll_ra_accounting(llap, page->mapping);
- ll_truncate_complete_page(page);
- ++count;
- } else {
- LL_CDEBUG_PAGE(D_PAGE, page, "Not dropping page"
- " because it is "
- "%s\n",
- PageDirty(page)?
- "dirty":"mapped");
- }
- }
- unlock_page(page);
- page_cache_release(page);
-
- spin_lock(&sbi->ll_lock);
- }
- list_del(&dummy_llap.llap_pglist_item);
- spin_unlock(&sbi->ll_lock);
-
- CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
- count, want, total);
-
- return count;
-}
-
-struct ll_async_page *llap_from_page(struct page *page, unsigned origin)
+int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from,
+ unsigned to)
{
- struct ll_async_page *llap;
- struct obd_export *exp;
- struct inode *inode = page->mapping->host;
- struct ll_sb_info *sbi;
- int rc;
+ struct ll_cl_context *lcc;
+ int result;
ENTRY;
- if (!inode) {
- static int triggered;
-
- if (!triggered) {
- LL_CDEBUG_PAGE(D_ERROR, page, "Bug 10047. Wrong anon "
- "page received\n");
- libcfs_debug_dumpstack(NULL);
- triggered = 1;
+ lcc = ll_cl_init(file, vmpage, 1);
+ if (!IS_ERR(lcc)) {
+ struct lu_env *env = lcc->lcc_env;
+ struct cl_io *io = lcc->lcc_io;
+ struct cl_page *page = lcc->lcc_page;
+
+ cl_page_assume(env, io, page);
+ if (cl_io_is_append(io)) {
+ struct cl_object *obj = io->ci_obj;
+ struct inode *inode = ccc_object_inode(obj);
+ /**
+ * In VFS file->page write loop, for appending, the
+ * write offset might be reset according to the new
+ * file size before holding i_mutex. So crw_pos should
+ * be reset here. BUG:17711.
+ */
+ io->u.ci_wr.wr.crw_pos = i_size_read(inode);
}
- RETURN(ERR_PTR(-EINVAL));
- }
- sbi = ll_i2sbi(inode);
- LASSERT(ll_async_page_slab);
- LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
-
- llap = llap_cast_private(page);
- if (llap != NULL) {
- /* move to end of LRU list, except when page is just about to
- * die */
- if (origin != LLAP_ORIGIN_REMOVEPAGE) {
- spin_lock(&sbi->ll_lock);
- sbi->ll_pglist_gen++;
- list_del_init(&llap->llap_pglist_item);
- list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
- spin_unlock(&sbi->ll_lock);
- }
- GOTO(out, llap);
- }
-
- exp = ll_i2dtexp(page->mapping->host);
- if (exp == NULL)
- RETURN(ERR_PTR(-EINVAL));
-
- /* limit the number of lustre-cached pages */
- if (sbi->ll_async_page_count >= sbi->ll_async_page_max)
- llap_shrink_cache(sbi, 0);
-
- OBD_SLAB_ALLOC(llap, ll_async_page_slab, CFS_ALLOC_STD,
- ll_async_page_slab_size);
- if (llap == NULL)
- RETURN(ERR_PTR(-ENOMEM));
- llap->llap_magic = LLAP_MAGIC;
- llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
-
- rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
- (obd_off)page->index << CFS_PAGE_SHIFT,
- &ll_async_page_ops, llap, &llap->llap_cookie);
- if (rc) {
- OBD_SLAB_FREE(llap, ll_async_page_slab,
- ll_async_page_slab_size);
- RETURN(ERR_PTR(rc));
- }
-
- CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
- page, llap->llap_cookie, (obd_off)page->index << CFS_PAGE_SHIFT);
- /* also zeroing the PRIVBITS low order bitflags */
- __set_page_ll_data(page, llap);
- llap->llap_page = page;
- spin_lock(&sbi->ll_lock);
- sbi->ll_pglist_gen++;
- sbi->ll_async_page_count++;
- list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
- INIT_LIST_HEAD(&llap->llap_pending_write);
- spin_unlock(&sbi->ll_lock);
-
- out:
- if (unlikely(sbi->ll_flags & LL_SBI_CHECKSUM)) {
- __u32 csum;
- char *kaddr = kmap_atomic(page, KM_USER0);
- csum = init_checksum(OSC_DEFAULT_CKSUM);
- csum = compute_checksum(csum, kaddr, CFS_PAGE_SIZE,
- OSC_DEFAULT_CKSUM);
- kunmap_atomic(kaddr, KM_USER0);
- if (origin == LLAP_ORIGIN_READAHEAD ||
- origin == LLAP_ORIGIN_READPAGE) {
- llap->llap_checksum = 0;
- } else if (origin == LLAP_ORIGIN_COMMIT_WRITE ||
- llap->llap_checksum == 0) {
- llap->llap_checksum = csum;
- CDEBUG(D_PAGE, "page %p cksum %x\n", page, csum);
- } else if (llap->llap_checksum == csum) {
- /* origin == LLAP_ORIGIN_WRITEPAGE */
- CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
- page, csum);
+ result = cl_io_prepare_write(env, io, page, from, to);
+ if (result == 0) {
+ /*
+ * Add a reference, so that page is not evicted from
+ * the cache until ->commit_write() is called.
+ */
+ cl_page_get(page);
+ lu_ref_add(&page->cp_reference, "prepare_write",
+ cfs_current());
} else {
- /* origin == LLAP_ORIGIN_WRITEPAGE */
- LL_CDEBUG_PAGE(D_ERROR, page, "old cksum %x != new "
- "%x!\n", llap->llap_checksum, csum);
+ cl_page_unassume(env, io, page);
+ ll_cl_fini(lcc);
}
+ /* returning 0 in prepare assumes commit must be called
+ * afterwards */
+ } else {
+ result = PTR_ERR(lcc);
}
-
- llap->llap_origin = origin;
- RETURN(llap);
+ RETURN(result);
}
-static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
- struct ll_async_page *llap,
- unsigned to, obd_flag async_flags)
-{
- unsigned long size_index = i_size_read(inode) >> CFS_PAGE_SHIFT;
- struct obd_io_group *oig;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- int rc, noquot = llap->llap_ignore_quota ? OBD_BRW_NOQUOTA : 0;
- ENTRY;
-
- /* _make_ready only sees llap once we've unlocked the page */
- llap->llap_write_queued = 1;
- rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
- llap->llap_cookie, OBD_BRW_WRITE | noquot,
- 0, 0, 0, async_flags);
- if (rc == 0) {
- LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
- GOTO(out, 0);
- }
-
- llap->llap_write_queued = 0;
- /* Do not pass llap here as it is sync write. */
- llap_write_pending(inode, NULL);
-
- rc = oig_init(&oig);
- if (rc)
- GOTO(out, rc);
-
- /* make full-page requests if we are not at EOF (bug 4410) */
- if (to != CFS_PAGE_SIZE && llap->llap_page->index < size_index) {
- LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
- "sync write before EOF: size_index %lu, to %d\n",
- size_index, to);
- to = CFS_PAGE_SIZE;
- } else if (to != CFS_PAGE_SIZE && llap->llap_page->index == size_index) {
- int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
- LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
- "sync write at EOF: size_index %lu, to %d/%d\n",
- size_index, to, size_to);
- if (to < size_to)
- to = size_to;
- }
-
- /* compare the checksum once before the page leaves llite */
- if (unlikely((sbi->ll_flags & LL_SBI_CHECKSUM) &&
- llap->llap_checksum != 0)) {
- __u32 csum;
- struct page *page = llap->llap_page;
- char *kaddr = kmap_atomic(page, KM_USER0);
- csum = init_checksum(OSC_DEFAULT_CKSUM);
- csum = compute_checksum(csum, kaddr, CFS_PAGE_SIZE,
- OSC_DEFAULT_CKSUM);
- kunmap_atomic(kaddr, KM_USER0);
- if (llap->llap_checksum == csum) {
- CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
- page, csum);
- } else {
- CERROR("page %p old cksum %x != new cksum %x!\n",
- page, llap->llap_checksum, csum);
- }
- }
-
- rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
- llap->llap_cookie, OBD_BRW_WRITE | noquot,
- 0, to, 0, ASYNC_READY | ASYNC_URGENT |
- ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
- if (rc)
- GOTO(free_oig, rc);
-
- rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
- if (rc)
- GOTO(free_oig, rc);
-
- rc = oig_wait(oig);
-
- if (!rc && async_flags & ASYNC_READY) {
- unlock_page(llap->llap_page);
- if (PageWriteback(llap->llap_page)) {
- end_page_writeback(llap->llap_page);
- }
- }
-
- if (rc == 0 && llap_write_complete(inode, llap))
- ll_queue_done_writing(inode, 0);
-
- LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
-
-free_oig:
- oig_release(oig);
-out:
- RETURN(rc);
-}
-
-/* update our write count to account for i_size increases that may have
- * happened since we've queued the page for io. */
-
-/* be careful not to return success without setting the page Uptodate or
- * the next pass through prepare_write will read in stale data from disk. */
-int ll_commit_write(struct file *file, struct page *page, unsigned from,
+int ll_commit_write(struct file *file, struct page *vmpage, unsigned from,
unsigned to)
{
- struct inode *inode = page->mapping->host;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- struct obd_export *exp;
- struct ll_async_page *llap;
- loff_t size;
- int rc = 0;
+ struct ll_cl_context *lcc;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct cl_page *page;
+ int result = 0;
ENTRY;
- SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
- LASSERT(inode == file->f_dentry->d_inode);
- LASSERT(PageLocked(page));
-
- CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
- inode, page, from, to, page->index);
+ lcc = ll_cl_get();
+ env = lcc->lcc_env;
+ page = lcc->lcc_page;
+ io = lcc->lcc_io;
- llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
- if (IS_ERR(llap))
- RETURN(PTR_ERR(llap));
-
- exp = ll_i2dtexp(inode);
- if (exp == NULL)
- RETURN(-EINVAL);
-
- llap->llap_ignore_quota = capable(CAP_SYS_RESOURCE);
+ LASSERT(cl_page_is_owned(page, io));
+ LASSERT(from <= to);
+ if (from != to) /* handle short write case. */
+ result = cl_io_commit_write(env, io, page, from, to);
+ if (cl_page_is_owned(page, io))
+ cl_page_unassume(env, io, page);
/*
- * queue a write for some time in the future the first time we
- * dirty the page.
- *
- * This is different from what other file systems do: they usually
- * just mark page (and some of its buffers) dirty and rely on
- * balance_dirty_pages() to start a write-back. Lustre wants write-back
- * to be started earlier for the following reasons:
- *
- * (1) with a large number of clients we need to limit the amount
- * of cached data on the clients a lot;
- *
- * (2) large compute jobs generally want compute-only then io-only
- * and the IO should complete as quickly as possible;
- *
- * (3) IO is batched up to the RPC size and is async until the
- * client max cache is hit
- * (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
- *
+ * Release reference acquired by ll_prepare_write().
*/
- if (!PageDirty(page)) {
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_MISSES, 1);
-
- rc = queue_or_sync_write(exp, inode, llap, to, 0);
- if (rc)
- GOTO(out, rc);
- } else {
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_HITS, 1);
- }
-
- /* put the page in the page cache, from now on ll_removepage is
- * responsible for cleaning up the llap.
- * only set page dirty when it's queued to be write out */
- if (llap->llap_write_queued)
- set_page_dirty(page);
-
-out:
- size = (((obd_off)page->index) << CFS_PAGE_SHIFT) + to;
- ll_inode_size_lock(inode, 0);
- if (rc == 0) {
- lov_stripe_lock(lsm);
- obd_adjust_kms(exp, lsm, size, 0);
- lov_stripe_unlock(lsm);
- if (size > i_size_read(inode))
- i_size_write(inode, size);
- SetPageUptodate(page);
- } else if (size > i_size_read(inode)) {
- /* this page beyond the pales of i_size, so it can't be
- * truncated in ll_p_r_e during lock revoking. we must
- * teardown our book-keeping here. */
- ll_removepage(page);
- }
- ll_inode_size_unlock(inode, 0);
- RETURN(rc);
+ lu_ref_del(&page->cp_reference, "prepare_write", cfs_current());
+ cl_page_put(env, page);
+ ll_cl_fini(lcc);
+ RETURN(result);
}
-static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
+struct obd_capa *cl_capa_lookup(struct inode *inode, enum cl_req_type crt)
{
- struct ll_ra_info *ra = &sbi->ll_ra_info;
- unsigned long ret;
- ENTRY;
+ __u64 opc;
- spin_lock(&sbi->ll_lock);
- ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
- ra->ra_cur_pages += ret;
- spin_unlock(&sbi->ll_lock);
-
- RETURN(ret);
+ opc = crt == CRT_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
+ return ll_osscapa_get(inode, opc);
}
-static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
-{
- struct ll_ra_info *ra = &sbi->ll_ra_info;
- spin_lock(&sbi->ll_lock);
- LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
- ra->ra_cur_pages, len);
- ra->ra_cur_pages -= len;
- spin_unlock(&sbi->ll_lock);
-}
+static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
-/* called for each page in a completed rpc.*/
-int ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
+/**
+ * Get readahead pages from the filesystem readahead pool of the client for a
+ * thread.
+ *
+ * /param sbi superblock for filesystem readahead state ll_ra_info
+ * /param ria per-thread readahead state
+ * /param pages number of pages requested for readahead for the thread.
+ *
+ * WARNING: This algorithm is used to reduce contention on sbi->ll_lock.
+ * It should work well if the ra_max_pages is much greater than the single
+ * file's read-ahead window, and not too many threads contending for
+ * these readahead pages.
+ *
+ * TODO: There may be a 'global sync problem' if many threads are trying
+ * to get an ra budget that is larger than the remaining readahead pages
+ * and reach here at exactly the same time. They will compute /a ret to
+ * consume the remaining pages, but will fail at atomic_add_return() and
+ * get a zero ra window, although there is still ra space remaining. - Jay */
+
+static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
+ struct ra_io_arg *ria,
+ unsigned long pages)
{
- struct ll_async_page *llap;
- struct page *page;
- int ret = 0;
+ struct ll_ra_info *ra = &sbi->ll_ra_info;
+ long ret;
ENTRY;
- llap = LLAP_FROM_COOKIE(data);
- page = llap->llap_page;
- LASSERT(PageLocked(page));
- LASSERT(CheckWriteback(page,cmd));
-
- LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
-
- if (cmd & OBD_BRW_READ && llap->llap_defer_uptodate)
- ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
-
- if (rc == 0) {
- if (cmd & OBD_BRW_READ) {
- if (!llap->llap_defer_uptodate)
- SetPageUptodate(page);
- } else {
- llap->llap_write_queued = 0;
- }
- ClearPageError(page);
- } else {
- if (cmd & OBD_BRW_READ) {
- llap->llap_defer_uptodate = 0;
- }
- SetPageError(page);
- if (rc == -ENOSPC)
- set_bit(AS_ENOSPC, &page->mapping->flags);
- else
- set_bit(AS_EIO, &page->mapping->flags);
- }
-
- unlock_page(page);
-
- if (cmd & OBD_BRW_WRITE) {
- /* Only rc == 0, write succeed, then this page could be deleted
- * from the pending_writing list
- */
- if (rc == 0 && llap_write_complete(page->mapping->host, llap))
- ll_queue_done_writing(page->mapping->host, 0);
+ /* If read-ahead pages left are less than 1M, do not do read-ahead,
+ * otherwise it will form small read RPC(< 1M), which hurt server
+ * performance a lot. */
+ ret = min(ra->ra_max_pages - cfs_atomic_read(&ra->ra_cur_pages), pages);
+ if (ret < 0 || ret < min_t(long, PTLRPC_MAX_BRW_PAGES, pages))
+ GOTO(out, ret = 0);
+
+ /* If the non-strided (ria_pages == 0) readahead window
+ * (ria_start + ret) has grown across an RPC boundary, then trim
+ * readahead size by the amount beyond the RPC so it ends on an
+ * RPC boundary. If the readahead window is already ending on
+ * an RPC boundary (beyond_rpc == 0), or smaller than a full
+ * RPC (beyond_rpc < ret) the readahead size is unchanged.
+ * The (beyond_rpc != 0) check is skipped since the conditional
+ * branch is more expensive than subtracting zero from the result.
+ *
+ * Strided read is left unaligned to avoid small fragments beyond
+ * the RPC boundary from needing an extra read RPC. */
+ if (ria->ria_pages == 0) {
+ long beyond_rpc = (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES;
+ if (/* beyond_rpc != 0 && */ beyond_rpc < ret)
+ ret -= beyond_rpc;
}
- if (PageWriteback(page)) {
- end_page_writeback(page);
+ if (cfs_atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
+ cfs_atomic_sub(ret, &ra->ra_cur_pages);
+ ret = 0;
}
- page_cache_release(page);
+out:
RETURN(ret);
}
-/* the kernel calls us here when a page is unhashed from the page cache.
- * the page will be locked and the kernel is holding a spinlock, so
- * we need to be careful. we're just tearing down our book-keeping
- * here. */
-void ll_removepage(struct page *page)
-{
- struct inode *inode = page->mapping->host;
- struct obd_export *exp;
- struct ll_async_page *llap;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- int rc;
- ENTRY;
-
- LASSERT(!in_interrupt());
-
- /* sync pages or failed read pages can leave pages in the page
- * cache that don't have our data associated with them anymore */
- if (page_private(page) == 0) {
- EXIT;
- return;
- }
-
- LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
-
- exp = ll_i2dtexp(inode);
- if (exp == NULL) {
- CERROR("page %p ind %lu gave null export\n", page, page->index);
- EXIT;
- return;
- }
-
- llap = llap_from_page(page, LLAP_ORIGIN_REMOVEPAGE);
- if (IS_ERR(llap)) {
- CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
- page->index, PTR_ERR(llap));
- EXIT;
- return;
- }
-
- if (llap_write_complete(inode, llap))
- ll_queue_done_writing(inode, 0);
-
- rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
- llap->llap_cookie);
- if (rc != 0)
- CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
-
- /* this unconditional free is only safe because the page lock
- * is providing exclusivity to memory pressure/truncate/writeback..*/
- __clear_page_ll_data(page);
-
- spin_lock(&sbi->ll_lock);
- if (!list_empty(&llap->llap_pglist_item))
- list_del_init(&llap->llap_pglist_item);
- sbi->ll_pglist_gen++;
- sbi->ll_async_page_count--;
- spin_unlock(&sbi->ll_lock);
- OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
- EXIT;
-}
-
-static int ll_page_matches(struct page *page, int fd_flags)
-{
- struct lustre_handle match_lockh = {0};
- struct inode *inode = page->mapping->host;
- ldlm_policy_data_t page_extent;
- int flags, matches;
- ENTRY;
-
- if (unlikely(fd_flags & LL_FILE_GROUP_LOCKED))
- RETURN(1);
-
- page_extent.l_extent.start = (__u64)page->index << CFS_PAGE_SHIFT;
- page_extent.l_extent.end =
- page_extent.l_extent.start + CFS_PAGE_SIZE - 1;
- flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
- if (!(fd_flags & LL_FILE_READAHEAD))
- flags |= LDLM_FL_CBPENDING;
- matches = obd_match(ll_i2sbi(inode)->ll_dt_exp,
- ll_i2info(inode)->lli_smd, LDLM_EXTENT,
- &page_extent, LCK_PR | LCK_PW, &flags, inode,
- &match_lockh);
- RETURN(matches);
-}
-
-static int ll_issue_page_read(struct obd_export *exp,
- struct ll_async_page *llap,
- struct obd_io_group *oig, int defer)
+void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
{
- struct page *page = llap->llap_page;
- int rc;
-
- page_cache_get(page);
- llap->llap_defer_uptodate = defer;
- llap->llap_ra_used = 0;
- rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
- NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
- CFS_PAGE_SIZE, 0, ASYNC_COUNT_STABLE |
- ASYNC_READY | ASYNC_URGENT);
- if (rc) {
- LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
- page_cache_release(page);
- }
- RETURN(rc);
+ struct ll_ra_info *ra = &sbi->ll_ra_info;
+ cfs_atomic_sub(len, &ra->ra_cur_pages);
}
-static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
+static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
{
LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
- ra->ra_stats[which]++;
+ lprocfs_counter_incr(sbi->ll_ra_stats, which);
}
-static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
+void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
{
struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
- struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
-
- spin_lock(&sbi->ll_lock);
- ll_ra_stats_inc_unlocked(ra, which);
- spin_unlock(&sbi->ll_lock);
-}
-
-void ll_ra_accounting(struct ll_async_page *llap, struct address_space *mapping)
-{
- if (!llap->llap_defer_uptodate || llap->llap_ra_used)
- return;
-
- ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
+ ll_ra_stats_inc_sbi(sbi, which);
}
#define RAS_CDEBUG(ras) \
CDEBUG(D_READA, \
"lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu" \
- "csr %lu sf %lu sp %lu sl %lu \n", \
+ "csr %lu sf %lu sp %lu sl %lu \n", \
ras->ras_last_readpage, ras->ras_consecutive_requests, \
ras->ras_consecutive_pages, ras->ras_window_start, \
ras->ras_window_len, ras->ras_next_readahead, \
- ras->ras_requests, ras->ras_request_index, \
+ ras->ras_requests, ras->ras_request_index, \
ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \
ras->ras_stride_pages, ras->ras_stride_length)
void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
{
- struct ll_readahead_state *ras;
+ struct ll_readahead_state *ras;
- ras = ll_ras_get(f);
+ ras = ll_ras_get(f);
- spin_lock(&ras->ras_lock);
- ras->ras_requests++;
- ras->ras_request_index = 0;
- ras->ras_consecutive_requests++;
- rar->lrr_reader = current;
+ spin_lock(&ras->ras_lock);
+ ras->ras_requests++;
+ ras->ras_request_index = 0;
+ ras->ras_consecutive_requests++;
+ rar->lrr_reader = current;
- list_add(&rar->lrr_linkage, &ras->ras_read_beads);
- spin_unlock(&ras->ras_lock);
+ cfs_list_add(&rar->lrr_linkage, &ras->ras_read_beads);
+ spin_unlock(&ras->ras_lock);
}
void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
{
- struct ll_readahead_state *ras;
+ struct ll_readahead_state *ras;
- ras = ll_ras_get(f);
+ ras = ll_ras_get(f);
- spin_lock(&ras->ras_lock);
- list_del_init(&rar->lrr_linkage);
- spin_unlock(&ras->ras_lock);
+ spin_lock(&ras->ras_lock);
+ cfs_list_del_init(&rar->lrr_linkage);
+ spin_unlock(&ras->ras_lock);
}
static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
{
struct ll_ra_read *scan;
- list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
+ cfs_list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
if (scan->lrr_reader == current)
return scan;
}
struct ll_ra_read *ll_ra_read_get(struct file *f)
{
- struct ll_readahead_state *ras;
- struct ll_ra_read *bead;
+ struct ll_readahead_state *ras;
+ struct ll_ra_read *bead;
+
+ ras = ll_ras_get(f);
+
+ spin_lock(&ras->ras_lock);
+ bead = ll_ra_read_get_locked(ras);
+ spin_unlock(&ras->ras_lock);
+ return bead;
+}
+
+static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue, struct cl_page *page,
+ struct page *vmpage)
+{
+ struct ccc_page *cp;
+ int rc;
- ras = ll_ras_get(f);
+ ENTRY;
- spin_lock(&ras->ras_lock);
- bead = ll_ra_read_get_locked(ras);
- spin_unlock(&ras->ras_lock);
- return bead;
+ rc = 0;
+ cl_page_assume(env, io, page);
+ lu_ref_add(&page->cp_reference, "ra", cfs_current());
+ cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
+ if (!cp->cpg_defer_uptodate && !PageUptodate(vmpage)) {
+ rc = cl_page_is_under_lock(env, io, page);
+ if (rc == -EBUSY) {
+ cp->cpg_defer_uptodate = 1;
+ cp->cpg_ra_used = 0;
+ cl_page_list_add(queue, page);
+ rc = 1;
+ } else {
+ cl_page_delete(env, page);
+ rc = -ENOLCK;
+ }
+ } else {
+ /* skip completed pages */
+ cl_page_unassume(env, io, page);
+ }
+ lu_ref_del(&page->cp_reference, "ra", cfs_current());
+ cl_page_put(env, page);
+ RETURN(rc);
}
-static int ll_read_ahead_page(struct obd_export *exp, struct obd_io_group *oig,
- int index, struct address_space *mapping)
+/**
+ * Initiates read-ahead of a page with given index.
+ *
+ * \retval +ve: page was added to \a queue.
+ *
+ * \retval -ENOLCK: there is no extent lock for this part of a file, stop
+ * read-ahead.
+ *
+ * \retval -ve, 0: page wasn't added to \a queue for other reason.
+ */
+static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue,
+ pgoff_t index, struct address_space *mapping)
{
- struct ll_async_page *llap;
- struct page *page;
- unsigned int gfp_mask = 0;
- int rc = 0;
-
+ struct page *vmpage;
+ struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
+ struct cl_page *page;
+ enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
+ unsigned int gfp_mask;
+ int rc = 0;
+ const char *msg = NULL;
+
+ ENTRY;
+
gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
#ifdef __GFP_NOWARN
gfp_mask |= __GFP_NOWARN;
#endif
- page = grab_cache_page_nowait_gfp(mapping, index, gfp_mask);
- if (page == NULL) {
- ll_ra_stats_inc(mapping, RA_STAT_FAILED_GRAB_PAGE);
- CDEBUG(D_READA, "g_c_p_n failed\n");
- return 0;
- }
-
- /* Check if page was truncated or reclaimed */
- if (page->mapping != mapping) {
- ll_ra_stats_inc(mapping, RA_STAT_WRONG_GRAB_PAGE);
- CDEBUG(D_READA, "g_c_p_n returned invalid page\n");
- GOTO(unlock_page, rc = 0);
- }
-
- /* we do this first so that we can see the page in the /proc
- * accounting */
- llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
- if (IS_ERR(llap) || llap->llap_defer_uptodate) {
- if (PTR_ERR(llap) == -ENOLCK) {
- ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
- CDEBUG(D_READA | D_PAGE,
- "Adding page to cache failed index "
- "%d\n", index);
- CDEBUG(D_READA, "nolock page\n");
- GOTO(unlock_page, rc = -ENOLCK);
+ vmpage = grab_cache_page_nowait(mapping, index);
+ if (vmpage != NULL) {
+ /* Check if vmpage was truncated or reclaimed */
+ if (vmpage->mapping == mapping) {
+ page = cl_page_find(env, clob, vmpage->index,
+ vmpage, CPT_CACHEABLE);
+ if (!IS_ERR(page)) {
+ rc = cl_read_ahead_page(env, io, queue,
+ page, vmpage);
+ if (rc == -ENOLCK) {
+ which = RA_STAT_FAILED_MATCH;
+ msg = "lock match failed";
+ }
+ } else {
+ which = RA_STAT_FAILED_GRAB_PAGE;
+ msg = "cl_page_find failed";
+ }
+ } else {
+ which = RA_STAT_WRONG_GRAB_PAGE;
+ msg = "g_c_p_n returned invalid page";
}
- CDEBUG(D_READA, "read-ahead page\n");
- GOTO(unlock_page, rc = 0);
- }
-
- /* skip completed pages */
- if (Page_Uptodate(page))
- GOTO(unlock_page, rc = 0);
-
- /* bail out when we hit the end of the lock. */
- rc = ll_issue_page_read(exp, llap, oig, 1);
- if (rc == 0) {
- LL_CDEBUG_PAGE(D_READA | D_PAGE, page, "started read-ahead\n");
- rc = 1;
+ if (rc != 1)
+ unlock_page(vmpage);
+ page_cache_release(vmpage);
} else {
-unlock_page:
- unlock_page(page);
- LL_CDEBUG_PAGE(D_READA | D_PAGE, page, "skipping read-ahead\n");
+ which = RA_STAT_FAILED_GRAB_PAGE;
+ msg = "g_c_p_n failed";
}
- page_cache_release(page);
- return rc;
+ if (msg != NULL) {
+ ll_ra_stats_inc(mapping, which);
+ CDEBUG(D_READA, "%s\n", msg);
+ }
+ RETURN(rc);
}
-/* ra_io_arg will be filled in the beginning of ll_readahead with
- * ras_lock, then the following ll_read_ahead_pages will read RA
- * pages according to this arg, all the items in this structure are
- * counted by page index.
- */
-struct ra_io_arg {
- unsigned long ria_start; /* start offset of read-ahead*/
- unsigned long ria_end; /* end offset of read-ahead*/
- /* If stride read pattern is detected, ria_stoff means where
- * stride read is started. Note: for normal read-ahead, the
- * value here is meaningless, and also it will not be accessed*/
- pgoff_t ria_stoff;
- /* ria_length and ria_pages are the length and pages length in the
- * stride I/O mode. And they will also be used to check whether
- * it is stride I/O read-ahead in the read-ahead pages*/
- unsigned long ria_length;
- unsigned long ria_pages;
-};
-
-#define RIA_DEBUG(ria) \
+#define RIA_DEBUG(ria) \
CDEBUG(D_READA, "rs %lu re %lu ro %lu rl %lu rp %lu\n", \
ria->ria_start, ria->ria_end, ria->ria_stoff, ria->ria_length,\
ria->ria_pages)
-#define RAS_INCREASE_STEP (1024 * 1024 >> CFS_PAGE_SHIFT)
+/* Limit this to the blocksize instead of PTLRPC_BRW_MAX_SIZE, since we don't
+ * know what the actual RPC size is. If this needs to change, it makes more
+ * sense to tune the i_blkbits value for the file based on the OSTs it is
+ * striped over, rather than having a constant value for all files here. */
+
+/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - CFS_PAGE_SHIFT)).
+ * Temprarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
+ * by default, this should be adjusted corresponding with max_read_ahead_mb
+ * and max_read_ahead_per_file_mb otherwise the readahead budget can be used
+ * up quickly which will affect read performance siginificantly. See LU-2816 */
+#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> CFS_PAGE_SHIFT)
static inline int stride_io_mode(struct ll_readahead_state *ras)
{
- return ras->ras_consecutive_stride_requests > 1;
+ return ras->ras_consecutive_stride_requests > 1;
}
-
-/* The function calculates how much pages will be read in
- * [off, off + length], which will be read by stride I/O mode,
- * stride_offset = st_off, stride_lengh = st_len,
+/* The function calculates how much pages will be read in
+ * [off, off + length], in such stride IO area,
+ * stride_offset = st_off, stride_lengh = st_len,
* stride_pages = st_pgs
- */
+ *
+ * |------------------|*****|------------------|*****|------------|*****|....
+ * st_off
+ * |--- st_pgs ---|
+ * |----- st_len -----|
+ *
+ * How many pages it should read in such pattern
+ * |-------------------------------------------------------------|
+ * off
+ * |<------ length ------->|
+ *
+ * = |<----->| + |-------------------------------------| + |---|
+ * start_left st_pgs * i end_left
+ */
static unsigned long
-stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs,
- unsigned long off, unsigned length)
+stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs,
+ unsigned long off, unsigned long length)
{
- unsigned long cont_len = st_off > off ? st_off - off : 0;
- unsigned long stride_len = length + off > st_off ?
- length + off + 1 - st_off : 0;
- unsigned long left, pg_count;
+ __u64 start = off > st_off ? off - st_off : 0;
+ __u64 end = off + length > st_off ? off + length - st_off : 0;
+ unsigned long start_left = 0;
+ unsigned long end_left = 0;
+ unsigned long pg_count;
- if (st_len == 0 || length == 0)
+ if (st_len == 0 || length == 0 || end == 0)
return length;
- left = do_div(stride_len, st_len);
- left = min(left, st_pgs);
+ start_left = do_div(start, st_len);
+ if (start_left < st_pgs)
+ start_left = st_pgs - start_left;
+ else
+ start_left = 0;
+
+ end_left = do_div(end, st_len);
+ if (end_left > st_pgs)
+ end_left = st_pgs;
- pg_count = left + stride_len * st_pgs + cont_len;
+ CDEBUG(D_READA, "start "LPU64", end "LPU64" start_left %lu end_left %lu \n",
+ start, end, start_left, end_left);
- LASSERT(pg_count >= left);
+ if (start == end)
+ pg_count = end_left - (st_pgs - start_left);
+ else
+ pg_count = start_left + st_pgs * (end - start - 1) + end_left;
- CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %u"
- "pgcount %lu\n", st_off, st_len, st_pgs, off, length, pg_count);
+ CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %lu"
+ "pgcount %lu\n", st_off, st_len, st_pgs, off, length, pg_count);
return pg_count;
}
static int ria_page_count(struct ra_io_arg *ria)
{
- __u64 length = ria->ria_end >= ria->ria_start ?
+ __u64 length = ria->ria_end >= ria->ria_start ?
ria->ria_end - ria->ria_start + 1 : 0;
- return stride_pg_count(ria->ria_stoff, ria->ria_length,
+ return stride_pg_count(ria->ria_stoff, ria->ria_length,
ria->ria_pages, ria->ria_start,
length);
}
static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria)
{
/* If ria_length == ria_pages, it means non-stride I/O mode,
- * idx should always inside read-ahead window in this case
+ * idx should always inside read-ahead window in this case
* For stride I/O mode, just check whether the idx is inside
* the ria_pages. */
- return ria->ria_length == 0 || ria->ria_length == ria->ria_pages ||
- (idx - ria->ria_stoff) % ria->ria_length < ria->ria_pages;
+ return ria->ria_length == 0 || ria->ria_length == ria->ria_pages ||
+ (idx >= ria->ria_stoff && (idx - ria->ria_stoff) %
+ ria->ria_length < ria->ria_pages);
}
-static int ll_read_ahead_pages(struct obd_export *exp,
- struct obd_io_group *oig,
- struct ra_io_arg *ria,
+static int ll_read_ahead_pages(const struct lu_env *env,
+ struct cl_io *io, struct cl_page_list *queue,
+ struct ra_io_arg *ria,
unsigned long *reserved_pages,
struct address_space *mapping,
unsigned long *ra_end)
LASSERT(ria != NULL);
RIA_DEBUG(ria);
-
+
stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0;
for (page_idx = ria->ria_start; page_idx <= ria->ria_end &&
*reserved_pages > 0; page_idx++) {
if (ras_inside_ra_window(page_idx, ria)) {
/* If the page is inside the read-ahead window*/
- rc = ll_read_ahead_page(exp, oig, page_idx, mapping);
- if (rc == 1) {
- (*reserved_pages)--;
- count ++;
- } else if (rc == -ENOLCK)
- break;
+ rc = ll_read_ahead_page(env, io, queue,
+ page_idx, mapping);
+ if (rc == 1) {
+ (*reserved_pages)--;
+ count ++;
+ } else if (rc == -ENOLCK)
+ break;
} else if (stride_ria) {
- /* If it is not in the read-ahead window, and it is
+ /* If it is not in the read-ahead window, and it is
* read-ahead mode, then check whether it should skip
- * the stride gap */
- pgoff_t offset;
- /* FIXME: This assertion only is valid when it is for
- * forward read-ahead, it will be fixed when backward
+ * the stride gap */
+ pgoff_t offset;
+ /* FIXME: This assertion only is valid when it is for
+ * forward read-ahead, it will be fixed when backward
* read-ahead is implemented */
- LASSERTF(page_idx > ria->ria_stoff, "since %lu in the"
- " gap of ra window,it should bigger than stride"
- " offset %lu \n", page_idx, ria->ria_stoff);
-
+ LASSERTF(page_idx > ria->ria_stoff, "Invalid page_idx %lu"
+ "rs %lu re %lu ro %lu rl %lu rp %lu\n", page_idx,
+ ria->ria_start, ria->ria_end, ria->ria_stoff,
+ ria->ria_length, ria->ria_pages);
offset = page_idx - ria->ria_stoff;
- offset = offset % (ria->ria_length);
- if (offset > ria->ria_pages) {
- page_idx += ria->ria_length - offset;
- CDEBUG(D_READA, "i %lu skip %lu \n", page_idx,
+ offset = offset % (ria->ria_length);
+ if (offset > ria->ria_pages) {
+ page_idx += ria->ria_length - offset;
+ CDEBUG(D_READA, "i %lu skip %lu \n", page_idx,
ria->ria_length - offset);
continue;
}
return count;
}
-static int ll_readahead(struct ll_readahead_state *ras,
- struct obd_export *exp, struct address_space *mapping,
- struct obd_io_group *oig, int flags)
+int ll_readahead(const struct lu_env *env, struct cl_io *io,
+ struct ll_readahead_state *ras, struct address_space *mapping,
+ struct cl_page_list *queue, int flags)
{
+ struct vvp_io *vio = vvp_env_io(env);
+ struct vvp_thread_info *vti = vvp_env_info(env);
+ struct cl_attr *attr = ccc_env_thread_attr(env);
unsigned long start = 0, end = 0, reserved;
- unsigned long ra_end, len;
+ unsigned long ra_end, len;
struct inode *inode;
- struct lov_stripe_md *lsm;
struct ll_ra_read *bead;
- struct ost_lvb lvb;
- struct ra_io_arg ria = { 0 };
- int ret = 0;
+ struct ra_io_arg *ria = &vti->vti_ria;
+ struct ll_inode_info *lli;
+ struct cl_object *clob;
+ int ret = 0;
__u64 kms;
ENTRY;
inode = mapping->host;
- lsm = ll_i2info(inode)->lli_smd;
+ lli = ll_i2info(inode);
+ clob = lli->lli_clob;
+
+ memset(ria, 0, sizeof *ria);
- lov_stripe_lock(lsm);
- inode_init_lvb(inode, &lvb);
- obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
- kms = lvb.lvb_size;
- lov_stripe_unlock(lsm);
+ cl_object_attr_lock(clob);
+ ret = cl_object_attr_get(env, clob, attr);
+ cl_object_attr_unlock(clob);
+
+ if (ret != 0)
+ RETURN(ret);
+ kms = attr->cat_kms;
if (kms == 0) {
ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
RETURN(0);
}
- spin_lock(&ras->ras_lock);
- bead = ll_ra_read_get_locked(ras);
+ spin_lock(&ras->ras_lock);
+ if (vio->cui_ra_window_set)
+ bead = &vio->cui_bead;
+ else
+ bead = NULL;
+
/* Enlarge the RA window to encompass the full read */
if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
bead->lrr_start + bead->lrr_count) {
ras->ras_window_len = bead->lrr_start + bead->lrr_count -
ras->ras_window_start;
}
- /* Reserve a part of the read-ahead window that we'll be issuing */
+ /* Reserve a part of the read-ahead window that we'll be issuing */
if (ras->ras_window_len) {
start = ras->ras_next_readahead;
end = ras->ras_window_start + ras->ras_window_len - 1;
}
if (end != 0) {
+ unsigned long rpc_boundary;
+ /*
+ * Align RA window to an optimal boundary.
+ *
+ * XXX This would be better to align to cl_max_pages_per_rpc
+ * instead of PTLRPC_MAX_BRW_PAGES, because the RPC size may
+ * be aligned to the RAID stripe size in the future and that
+ * is more important than the RPC size.
+ */
+ /* Note: we only trim the RPC, instead of extending the RPC
+ * to the boundary, so to avoid reading too much pages during
+ * random reading. */
+ rpc_boundary = ((end + 1) & (~(PTLRPC_MAX_BRW_PAGES - 1)));
+ if (rpc_boundary > 0)
+ rpc_boundary--;
+
+ if (rpc_boundary > start)
+ end = rpc_boundary;
+
/* Truncate RA window to end of file */
end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
+
ras->ras_next_readahead = max(end, end + 1);
RAS_CDEBUG(ras);
}
- ria.ria_start = start;
- ria.ria_end = end;
- /* If stride I/O mode is detected, get stride window*/
- if (stride_io_mode(ras)) {
- ria.ria_length = ras->ras_stride_length;
- ria.ria_pages = ras->ras_stride_pages;
- }
- spin_unlock(&ras->ras_lock);
+ ria->ria_start = start;
+ ria->ria_end = end;
+ /* If stride I/O mode is detected, get stride window*/
+ if (stride_io_mode(ras)) {
+ ria->ria_stoff = ras->ras_stride_offset;
+ ria->ria_length = ras->ras_stride_length;
+ ria->ria_pages = ras->ras_stride_pages;
+ }
+ spin_unlock(&ras->ras_lock);
if (end == 0) {
ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
RETURN(0);
}
- len = ria_page_count(&ria);
+ len = ria_page_count(ria);
if (len == 0)
RETURN(0);
-
- reserved = ll_ra_count_get(ll_i2sbi(inode), len);
- if (reserved < end - start + 1)
+ reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len);
+ if (reserved < len)
ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
- CDEBUG(D_READA, "reserved page %lu \n", reserved);
-
- ret = ll_read_ahead_pages(exp, oig, &ria, &reserved, mapping, &ra_end);
+ CDEBUG(D_READA, "reserved page %lu ra_cur %d ra_max %lu\n", reserved,
+ cfs_atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages),
+ ll_i2sbi(inode)->ll_ra_info.ra_max_pages);
+
+ ret = ll_read_ahead_pages(env, io, queue,
+ ria, &reserved, mapping, &ra_end);
LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
if (reserved != 0)
* if the region we failed to issue read-ahead on is still ahead
* of the app and behind the next index to start read-ahead from */
CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n",
- ra_end, end, ria.ria_end);
-
- if (ra_end != (end + 1)) {
- spin_lock(&ras->ras_lock);
- if (ra_end < ras->ras_next_readahead &&
- index_in_window(ra_end, ras->ras_window_start, 0,
- ras->ras_window_len)) {
- ras->ras_next_readahead = ra_end;
- RAS_CDEBUG(ras);
- }
- spin_unlock(&ras->ras_lock);
- }
+ ra_end, end, ria->ria_end);
+
+ if (ra_end != end + 1) {
+ spin_lock(&ras->ras_lock);
+ if (ra_end < ras->ras_next_readahead &&
+ index_in_window(ra_end, ras->ras_window_start, 0,
+ ras->ras_window_len)) {
+ ras->ras_next_readahead = ra_end;
+ RAS_CDEBUG(ras);
+ }
+ spin_unlock(&ras->ras_lock);
+ }
- RETURN(ret);
+ RETURN(ret);
}
-static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
+static void ras_set_start(struct inode *inode, struct ll_readahead_state *ras,
+ unsigned long index)
{
- ras->ras_window_start = index & (~(RAS_INCREASE_STEP - 1));
+ ras->ras_window_start = index & (~(RAS_INCREASE_STEP(inode) - 1));
}
/* called with the ras_lock held or from places where it doesn't matter */
-static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
+static void ras_reset(struct inode *inode, struct ll_readahead_state *ras,
+ unsigned long index)
{
- ras->ras_last_readpage = index;
- ras->ras_consecutive_requests = 0;
- ras->ras_consecutive_pages = 0;
- ras->ras_window_len = 0;
- ras_set_start(ras, index);
- ras->ras_next_readahead = max(ras->ras_window_start, index);
-
- RAS_CDEBUG(ras);
+ ras->ras_last_readpage = index;
+ ras->ras_consecutive_requests = 0;
+ ras->ras_consecutive_pages = 0;
+ ras->ras_window_len = 0;
+ ras_set_start(inode, ras, index);
+ ras->ras_next_readahead = max(ras->ras_window_start, index);
+
+ RAS_CDEBUG(ras);
}
/* called with the ras_lock held or from places where it doesn't matter */
static void ras_stride_reset(struct ll_readahead_state *ras)
{
ras->ras_consecutive_stride_requests = 0;
+ ras->ras_stride_length = 0;
+ ras->ras_stride_pages = 0;
RAS_CDEBUG(ras);
}
void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
{
- spin_lock_init(&ras->ras_lock);
- ras_reset(ras, 0);
- ras->ras_requests = 0;
- INIT_LIST_HEAD(&ras->ras_read_beads);
+ spin_lock_init(&ras->ras_lock);
+ ras_reset(inode, ras, 0);
+ ras->ras_requests = 0;
+ CFS_INIT_LIST_HEAD(&ras->ras_read_beads);
}
-/* Check whether the read request is in the stride window.
+/*
+ * Check whether the read request is in the stride window.
* If it is in the stride window, return 1, otherwise return 0.
- * and also update stride_gap and stride_pages.
*/
-static int index_in_stride_window(unsigned long index,
- struct ll_readahead_state *ras,
- struct inode *inode)
+static int index_in_stride_window(struct ll_readahead_state *ras,
+ unsigned long index)
{
- int stride_gap = index - ras->ras_last_readpage - 1;
-
- LASSERT(stride_gap != 0);
-
- if (ras->ras_consecutive_pages == 0)
- return 0;
-
- /*Otherwise check the stride by itself */
- if ((ras->ras_stride_length - ras->ras_stride_pages) == stride_gap &&
- ras->ras_consecutive_pages == ras->ras_stride_pages)
- return 1;
-
- if (stride_gap >= 0) {
- /*
- * only set stride_pages, stride_length if
- * it is forward reading ( stride_gap > 0)
- */
+ unsigned long stride_gap;
+
+ if (ras->ras_stride_length == 0 || ras->ras_stride_pages == 0 ||
+ ras->ras_stride_pages == ras->ras_stride_length)
+ return 0;
+
+ stride_gap = index - ras->ras_last_readpage - 1;
+
+ /* If it is contiguous read */
+ if (stride_gap == 0)
+ return ras->ras_consecutive_pages + 1 <= ras->ras_stride_pages;
+
+ /* Otherwise check the stride by itself */
+ return (ras->ras_stride_length - ras->ras_stride_pages) == stride_gap &&
+ ras->ras_consecutive_pages == ras->ras_stride_pages;
+}
+
+static void ras_update_stride_detector(struct ll_readahead_state *ras,
+ unsigned long index)
+{
+ unsigned long stride_gap = index - ras->ras_last_readpage - 1;
+
+ if (!stride_io_mode(ras) && (stride_gap != 0 ||
+ ras->ras_consecutive_stride_requests == 0)) {
ras->ras_stride_pages = ras->ras_consecutive_pages;
- ras->ras_stride_length = stride_gap + ras->ras_consecutive_pages;
- } else {
- /*
- * If stride_gap < 0,(back_forward reading),
- * reset the stride_pages/length.
- * FIXME:back_ward stride I/O read.
- *
- */
- ras->ras_stride_pages = 0;
- ras->ras_stride_length = 0;
+ ras->ras_stride_length = stride_gap +ras->ras_consecutive_pages;
}
- RAS_CDEBUG(ras);
+ LASSERT(ras->ras_request_index == 0);
+ LASSERT(ras->ras_consecutive_stride_requests == 0);
+
+ if (index <= ras->ras_last_readpage) {
+ /*Reset stride window for forward read*/
+ ras_stride_reset(ras);
+ return;
+ }
+
+ ras->ras_stride_pages = ras->ras_consecutive_pages;
+ ras->ras_stride_length = stride_gap +ras->ras_consecutive_pages;
- return 0;
+ RAS_CDEBUG(ras);
+ return;
}
static unsigned long
/* Stride Read-ahead window will be increased inc_len according to
* stride I/O pattern */
-static void ras_stride_increase_window(struct ll_readahead_state *ras,
+static void ras_stride_increase_window(struct ll_readahead_state *ras,
struct ll_ra_info *ra,
unsigned long inc_len)
{
unsigned long stride_len;
LASSERT(ras->ras_stride_length > 0);
+ LASSERTF(ras->ras_window_start + ras->ras_window_len
+ >= ras->ras_stride_offset, "window_start %lu, window_len %lu"
+ " stride_offset %lu\n", ras->ras_window_start,
+ ras->ras_window_len, ras->ras_stride_offset);
stride_len = ras->ras_window_start + ras->ras_window_len -
ras->ras_stride_offset;
- LASSERTF(stride_len > 0, "window_start %lu, window_len %lu"
- "stride_offset %lu\n", ras->ras_window_start,
- ras->ras_window_len, ras->ras_stride_offset);
-
left = stride_len % ras->ras_stride_length;
-
window_len = ras->ras_window_len - left;
-
+
if (left < ras->ras_stride_pages)
left += inc_len;
else
- left = ras->ras_stride_pages + inc_len;
+ left = ras->ras_stride_pages + inc_len;
LASSERT(ras->ras_stride_pages != 0);
window_len += step * ras->ras_stride_length + left;
- if (stride_page_count(ras, window_len) <= ra->ra_max_pages)
+ if (stride_page_count(ras, window_len) <= ra->ra_max_pages_per_file)
ras->ras_window_len = window_len;
RAS_CDEBUG(ras);
}
-/* Set stride I/O read-ahead window start offset */
-static void ras_set_stride_offset(struct ll_readahead_state *ras)
+static void ras_increase_window(struct inode *inode,
+ struct ll_readahead_state *ras,
+ struct ll_ra_info *ra)
{
- unsigned long window_len = ras->ras_next_readahead -
- ras->ras_window_start;
- unsigned long left;
-
- LASSERT(ras->ras_stride_length != 0);
-
- left = window_len % ras->ras_stride_length;
-
- ras->ras_stride_offset = ras->ras_next_readahead - left;
-
- RAS_CDEBUG(ras);
+ /* The stretch of ra-window should be aligned with max rpc_size
+ * but current clio architecture does not support retrieve such
+ * information from lower layer. FIXME later
+ */
+ if (stride_io_mode(ras))
+ ras_stride_increase_window(ras, ra, RAS_INCREASE_STEP(inode));
+ else
+ ras->ras_window_len = min(ras->ras_window_len +
+ RAS_INCREASE_STEP(inode),
+ ra->ra_max_pages_per_file);
}
-static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
- struct ll_readahead_state *ras, unsigned long index,
- unsigned hit)
+void ras_update(struct ll_sb_info *sbi, struct inode *inode,
+ struct ll_readahead_state *ras, unsigned long index,
+ unsigned hit)
{
- struct ll_ra_info *ra = &sbi->ll_ra_info;
- int zero = 0, stride_zero = 0, stride_detect = 0, ra_miss = 0;
- ENTRY;
+ struct ll_ra_info *ra = &sbi->ll_ra_info;
+ int zero = 0, stride_detect = 0, ra_miss = 0;
+ ENTRY;
- spin_lock(&sbi->ll_lock);
- spin_lock(&ras->ras_lock);
+ spin_lock(&ras->ras_lock);
- ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
+ ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS);
/* reset the read-ahead window in two cases. First when the app seeks
* or reads to some other part of the file. Secondly if we get a
* reclaiming it before we get to it. */
if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
zero = 1;
- ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
- /* check whether it is in stride I/O mode*/
- if (!index_in_stride_window(index, ras, inode))
- stride_zero = 1;
+ ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE);
} else if (!hit && ras->ras_window_len &&
index < ras->ras_next_readahead &&
index_in_window(index, ras->ras_window_start, 0,
ras->ras_window_len)) {
- zero = 1;
- ra_miss = 1;
- /* If it hits read-ahead miss and the stride I/O is still
- * not detected, reset stride stuff to re-detect the whole
- * stride I/O mode to avoid complication */
- if (!stride_io_mode(ras))
- stride_zero = 1;
- ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
+ ra_miss = 1;
+ ll_ra_stats_inc_sbi(sbi, RA_STAT_MISS_IN_WINDOW);
}
/* On the second access to a file smaller than the tunable
* ra_max_read_ahead_whole_pages trigger RA on all pages in the
- * file up to ra_max_pages. This is simply a best effort and
- * only occurs once per open file. Normal RA behavior is reverted
+ * file up to ra_max_pages_per_file. This is simply a best effort
+ * and only occurs once per open file. Normal RA behavior is reverted
* to for subsequent IO. The mmap case does not increment
* ras_requests and thus can never trigger this behavior. */
if (ras->ras_requests == 2 && !ras->ras_request_index) {
CFS_PAGE_SHIFT;
CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages,
- ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages);
+ ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
if (kms_pages &&
kms_pages <= ra->ra_max_read_ahead_whole_pages) {
ras->ras_window_start = 0;
ras->ras_last_readpage = 0;
ras->ras_next_readahead = 0;
- ras->ras_window_len = min(ra->ra_max_pages,
+ ras->ras_window_len = min(ra->ra_max_pages_per_file,
ra->ra_max_read_ahead_whole_pages);
GOTO(out_unlock, 0);
}
}
+ if (zero) {
+ /* check whether it is in stride I/O mode*/
+ if (!index_in_stride_window(ras, index)) {
+ if (ras->ras_consecutive_stride_requests == 0 &&
+ ras->ras_request_index == 0) {
+ ras_update_stride_detector(ras, index);
+ ras->ras_consecutive_stride_requests++;
+ } else {
+ ras_stride_reset(ras);
+ }
+ ras_reset(inode, ras, index);
+ ras->ras_consecutive_pages++;
+ GOTO(out_unlock, 0);
+ } else {
+ ras->ras_consecutive_pages = 0;
+ ras->ras_consecutive_requests = 0;
+ if (++ras->ras_consecutive_stride_requests > 1)
+ stride_detect = 1;
+ RAS_CDEBUG(ras);
+ }
+ } else {
+ if (ra_miss) {
+ if (index_in_stride_window(ras, index) &&
+ stride_io_mode(ras)) {
+ /*If stride-RA hit cache miss, the stride dector
+ *will not be reset to avoid the overhead of
+ *redetecting read-ahead mode */
+ if (index != ras->ras_last_readpage + 1)
+ ras->ras_consecutive_pages = 0;
+ ras_reset(inode, ras, index);
+ RAS_CDEBUG(ras);
+ } else {
+ /* Reset both stride window and normal RA
+ * window */
+ ras_reset(inode, ras, index);
+ ras->ras_consecutive_pages++;
+ ras_stride_reset(ras);
+ GOTO(out_unlock, 0);
+ }
+ } else if (stride_io_mode(ras)) {
+ /* If this is contiguous read but in stride I/O mode
+ * currently, check whether stride step still is valid,
+ * if invalid, it will reset the stride ra window*/
+ if (!index_in_stride_window(ras, index)) {
+ /* Shrink stride read-ahead window to be zero */
+ ras_stride_reset(ras);
+ ras->ras_window_len = 0;
+ ras->ras_next_readahead = index;
+ }
+ }
+ }
+ ras->ras_consecutive_pages++;
+ ras->ras_last_readpage = index;
+ ras_set_start(inode, ras, index);
+
+ if (stride_io_mode(ras))
+ /* Since stride readahead is sentivite to the offset
+ * of read-ahead, so we use original offset here,
+ * instead of ras_window_start, which is RPC aligned */
+ ras->ras_next_readahead = max(index, ras->ras_next_readahead);
+ else
+ ras->ras_next_readahead = max(ras->ras_window_start,
+ ras->ras_next_readahead);
+ RAS_CDEBUG(ras);
+
+ /* Trigger RA in the mmap case where ras_consecutive_requests
+ * is not incremented and thus can't be used to trigger RA */
+ if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) {
+ ras->ras_window_len = RAS_INCREASE_STEP(inode);
+ GOTO(out_unlock, 0);
+ }
- if (zero) {
- /* If it is discontinuous read, check
- * whether it is stride I/O mode*/
- if (stride_zero) {
- ras_reset(ras, index);
- ras->ras_consecutive_pages++;
- ras_stride_reset(ras);
- RAS_CDEBUG(ras);
- GOTO(out_unlock, 0);
- } else {
- /* The read is still in stride window or
- * it hits read-ahead miss */
-
- /* If ra-window miss is hitted, which probably means VM
- * pressure, and some read-ahead pages were reclaimed.So
- * the length of ra-window will not increased, but also
- * not reset to avoid redetecting the stride I/O mode.*/
- ras->ras_consecutive_requests = 0;
- if (!ra_miss) {
- ras->ras_consecutive_pages = 0;
- if (++ras->ras_consecutive_stride_requests > 1)
- stride_detect = 1;
- }
- RAS_CDEBUG(ras);
- }
- } else if (ras->ras_consecutive_stride_requests > 1) {
- /* If this is contiguous read but in stride I/O mode
- * currently, check whether stride step still is valid,
- * if invalid, it will reset the stride ra window*/
- if (ras->ras_consecutive_pages + 1 > ras->ras_stride_pages)
- ras_stride_reset(ras);
- }
-
- ras->ras_last_readpage = index;
- ras->ras_consecutive_pages++;
- ras_set_start(ras, index);
- ras->ras_next_readahead = max(ras->ras_window_start,
- ras->ras_next_readahead);
- RAS_CDEBUG(ras);
-
- /* Trigger RA in the mmap case where ras_consecutive_requests
- * is not incremented and thus can't be used to trigger RA */
- if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) {
- ras->ras_window_len = RAS_INCREASE_STEP;
- GOTO(out_unlock, 0);
- }
+ /* Initially reset the stride window offset to next_readahead*/
+ if (ras->ras_consecutive_stride_requests == 2 && stride_detect) {
+ /**
+ * Once stride IO mode is detected, next_readahead should be
+ * reset to make sure next_readahead > stride offset
+ */
+ ras->ras_next_readahead = max(index, ras->ras_next_readahead);
+ ras->ras_stride_offset = index;
+ ras->ras_window_len = RAS_INCREASE_STEP(inode);
+ }
- /* Initially reset the stride window offset to next_readahead*/
- if (ras->ras_consecutive_stride_requests == 2 && stride_detect)
- ras_set_stride_offset(ras);
-
- /* The initial ras_window_len is set to the request size. To avoid
- * uselessly reading and discarding pages for random IO the window is
- * only increased once per consecutive request received. */
- if ((ras->ras_consecutive_requests > 1 &&
- !ras->ras_request_index) || stride_detect) {
- if (stride_io_mode(ras))
- ras_stride_increase_window(ras, ra, RAS_INCREASE_STEP);
- else
- ras->ras_window_len = min(ras->ras_window_len +
- RAS_INCREASE_STEP,
- ra->ra_max_pages);
- }
- EXIT;
+ /* The initial ras_window_len is set to the request size. To avoid
+ * uselessly reading and discarding pages for random IO the window is
+ * only increased once per consecutive request received. */
+ if ((ras->ras_consecutive_requests > 1 || stride_detect) &&
+ !ras->ras_request_index)
+ ras_increase_window(inode, ras, ra);
+ EXIT;
out_unlock:
- RAS_CDEBUG(ras);
- ras->ras_request_index++;
- spin_unlock(&ras->ras_lock);
- spin_unlock(&sbi->ll_lock);
- return;
+ RAS_CDEBUG(ras);
+ ras->ras_request_index++;
+ spin_unlock(&ras->ras_lock);
+ return;
}
-int ll_writepage(struct page *page)
+int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
{
- struct inode *inode = page->mapping->host;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct obd_export *exp;
- struct ll_async_page *llap;
- int rc = 0;
+ struct inode *inode = vmpage->mapping->host;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct lu_env *env;
+ struct cl_io *io;
+ struct cl_page *page;
+ struct cl_object *clob;
+ struct cl_env_nest nest;
+ bool redirtied = false;
+ bool unlocked = false;
+ int result;
ENTRY;
- LASSERT(PageLocked(page));
-
- exp = ll_i2dtexp(inode);
- if (exp == NULL)
- GOTO(out, rc = -EINVAL);
-
- llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
- if (IS_ERR(llap))
- GOTO(out, rc = PTR_ERR(llap));
+ LASSERT(PageLocked(vmpage));
+ LASSERT(!PageWriteback(vmpage));
+
+ LASSERT(ll_i2dtexp(inode) != NULL);
+
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ GOTO(out, result = PTR_ERR(env));
+
+ clob = ll_i2info(inode)->lli_clob;
+ LASSERT(clob != NULL);
+
+ io = ccc_env_thread_io(env);
+ io->ci_obj = clob;
+ io->ci_ignore_layout = 1;
+ result = cl_io_init(env, io, CIT_MISC, clob);
+ if (result == 0) {
+ page = cl_page_find(env, clob, vmpage->index,
+ vmpage, CPT_CACHEABLE);
+ if (!IS_ERR(page)) {
+ lu_ref_add(&page->cp_reference, "writepage",
+ cfs_current());
+ cl_page_assume(env, io, page);
+ result = cl_page_flush(env, io, page);
+ if (result != 0) {
+ /*
+ * Re-dirty page on error so it retries write,
+ * but not in case when IO has actually
+ * occurred and completed with an error.
+ */
+ if (!PageError(vmpage)) {
+ redirty_page_for_writepage(wbc, vmpage);
+ result = 0;
+ redirtied = true;
+ }
+ }
+ cl_page_disown(env, io, page);
+ unlocked = true;
+ lu_ref_del(&page->cp_reference,
+ "writepage", cfs_current());
+ cl_page_put(env, page);
+ } else {
+ result = PTR_ERR(page);
+ }
+ }
+ cl_io_fini(env, io);
+
+ if (redirtied && wbc->sync_mode == WB_SYNC_ALL) {
+ loff_t offset = cl_offset(clob, vmpage->index);
+
+ /* Flush page failed because the extent is being written out.
+ * Wait for the write of extent to be finished to avoid
+ * breaking kernel which assumes ->writepage should mark
+ * PageWriteback or clean the page. */
+ result = cl_sync_file_range(inode, offset,
+ offset + CFS_PAGE_SIZE - 1,
+ CL_FSYNC_LOCAL);
+ if (result > 0) {
+ /* actually we may have written more than one page.
+ * decreasing this page because the caller will count
+ * it. */
+ wbc->nr_to_write -= result - 1;
+ result = 0;
+ }
+ }
- LASSERT(!PageWriteback(page));
- set_page_writeback(page);
+ cl_env_nested_put(&nest, env);
+ GOTO(out, result);
- page_cache_get(page);
- if (llap->llap_write_queued) {
- LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
- rc = obd_set_async_flags(exp, lli->lli_smd, NULL,
- llap->llap_cookie,
- ASYNC_READY | ASYNC_URGENT);
- } else {
- rc = queue_or_sync_write(exp, inode, llap, CFS_PAGE_SIZE,
- ASYNC_READY | ASYNC_URGENT);
- }
- if (rc)
- page_cache_release(page);
out:
- if (rc) {
- if (!lli->lli_async_rc)
- lli->lli_async_rc = rc;
- /* re-dirty page on error so it retries write */
- if (PageWriteback(page)) {
- end_page_writeback(page);
- }
- /* resend page only for not started IO*/
- if (!PageError(page))
- ll_redirty_page(page);
- unlock_page(page);
- }
- RETURN(rc);
+ if (result < 0) {
+ if (!lli->lli_async_rc)
+ lli->lli_async_rc = result;
+ SetPageError(vmpage);
+ if (!unlocked)
+ unlock_page(vmpage);
+ }
+ return result;
}
-/*
- * for now we do our readpage the same on both 2.4 and 2.5. The kernel's
- * read-ahead assumes it is valid to issue readpage all the way up to
- * i_size, but our dlm locks make that not the case. We disable the
- * kernel's read-ahead and do our own by walking ahead in the page cache
- * checking for dlm lock coverage. the main difference between 2.4 and
- * 2.6 is how read-ahead gets batched and issued, but we're using our own,
- * so they look the same.
- */
-int ll_readpage(struct file *filp, struct page *page)
+int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
- struct inode *inode = page->mapping->host;
- struct obd_export *exp;
- struct ll_async_page *llap;
- struct obd_io_group *oig = NULL;
- int rc;
- ENTRY;
-
- LASSERT(PageLocked(page));
- LASSERT(!PageUptodate(page));
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset=%Lu=%#Lx\n",
- inode->i_ino, inode->i_generation, inode,
- (((loff_t)page->index) << CFS_PAGE_SHIFT),
- (((loff_t)page->index) << CFS_PAGE_SHIFT));
- LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
-
- if (!ll_i2info(inode)->lli_smd) {
- /* File with no objects - one big hole */
- /* We use this just for remove_from_page_cache that is not
- * exported, we'd make page back up to date. */
- ll_truncate_complete_page(page);
- clear_page(kmap(page));
- kunmap(page);
- SetPageUptodate(page);
- unlock_page(page);
- RETURN(0);
- }
-
- rc = oig_init(&oig);
- if (rc < 0)
- GOTO(out, rc);
-
- exp = ll_i2dtexp(inode);
- if (exp == NULL)
- GOTO(out, rc = -EINVAL);
-
- llap = llap_from_page(page, LLAP_ORIGIN_READPAGE);
- if (IS_ERR(llap))
- GOTO(out, rc = PTR_ERR(llap));
-
- if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
- ras_update(ll_i2sbi(inode), inode, &fd->fd_ras, page->index,
- llap->llap_defer_uptodate);
-
-
- if (llap->llap_defer_uptodate) {
- /* This is the callpath if we got the page from a readahead */
- llap->llap_ra_used = 1;
- rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
- fd->fd_flags);
- if (rc > 0)
- obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
- NULL, oig);
- LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
- SetPageUptodate(page);
- unlock_page(page);
- GOTO(out_oig, rc = 0);
- }
+ struct inode *inode = mapping->host;
+ loff_t start;
+ loff_t end;
+ enum cl_fsync_mode mode;
+ int range_whole = 0;
+ int result;
+ ENTRY;
+
+ if (wbc->range_cyclic) {
+ start = mapping->writeback_index << CFS_PAGE_SHIFT;
+ end = OBD_OBJECT_EOF;
+ } else {
+ start = wbc->range_start;
+ end = wbc->range_end;
+ if (end == LLONG_MAX) {
+ end = OBD_OBJECT_EOF;
+ range_whole = start == 0;
+ }
+ }
- if (likely((fd->fd_flags & LL_FILE_IGNORE_LOCK) == 0)) {
- rc = ll_page_matches(page, fd->fd_flags);
- if (rc < 0) {
- LL_CDEBUG_PAGE(D_ERROR, page,
- "lock match failed: rc %d\n", rc);
- GOTO(out, rc);
- }
+ mode = CL_FSYNC_NONE;
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ mode = CL_FSYNC_LOCAL;
- if (rc == 0) {
- CWARN("ino %lu page %lu (%llu) not covered by "
- "a lock (mmap?). check debug logs.\n",
- inode->i_ino, page->index,
- (long long)page->index << CFS_PAGE_SHIFT);
- }
- }
+ result = cl_sync_file_range(inode, start, end, mode);
+ if (result > 0) {
+ wbc->nr_to_write -= result;
+ result = 0;
+ }
- rc = ll_issue_page_read(exp, llap, oig, 0);
- if (rc)
- GOTO(out, rc);
+ if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
+ if (end == OBD_OBJECT_EOF)
+ end = i_size_read(inode);
+ mapping->writeback_index = (end >> CFS_PAGE_SHIFT) + 1;
+ }
+ RETURN(result);
+}
- LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
- /* We have just requested the actual page we want, see if we can tack
- * on some readahead to that page's RPC before it is sent. */
- if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
- ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
- fd->fd_flags);
+int ll_readpage(struct file *file, struct page *vmpage)
+{
+ struct ll_cl_context *lcc;
+ int result;
+ ENTRY;
- rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
+ lcc = ll_cl_init(file, vmpage, 0);
+ if (!IS_ERR(lcc)) {
+ struct lu_env *env = lcc->lcc_env;
+ struct cl_io *io = lcc->lcc_io;
+ struct cl_page *page = lcc->lcc_page;
-out:
- if (rc)
- unlock_page(page);
-out_oig:
- if (oig != NULL)
- oig_release(oig);
- RETURN(rc);
+ LASSERT(page->cp_type == CPT_CACHEABLE);
+ if (likely(!PageUptodate(vmpage))) {
+ cl_page_assume(env, io, page);
+ result = cl_io_read_page(env, io, page);
+ } else {
+ /* Page from a non-object file. */
+ LASSERT(!ll_i2info(vmpage->mapping->host)->lli_has_smd);
+ unlock_page(vmpage);
+ result = 0;
+ }
+ ll_cl_fini(lcc);
+ } else {
+ unlock_page(vmpage);
+ result = PTR_ERR(lcc);
+ }
+ RETURN(result);
}
+