/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * Lustre Lite I/O Page Cache
+ * Lustre Lite I/O page cache routines shared by different kernel revs
*
- * Copyright (C) 2002 Cluster File Systems, Inc.
+ * Copyright (c) 2001-2003 Cluster File Systems, Inc.
+ *
+ * This file is part of Lustre, http://www.lustre.org.
+ *
+ * Lustre is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Lustre is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Lustre; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#ifndef AUTOCONF_INCLUDED
#include <linux/config.h>
+#endif
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/stat.h>
-#include <linux/iobuf.h>
#include <linux/errno.h>
-#include <linux/locks.h>
+#include <linux/smp_lock.h>
#include <linux/unistd.h>
#include <linux/version.h>
#include <asm/system.h>
#define DEBUG_SUBSYSTEM S_LLITE
-#include <linux/lustre_mds.h>
-#include <linux/lustre_lite.h>
-#include <linux/lustre_lib.h>
+//#include <lustre_mdc.h>
+#include <lustre_lite.h>
+#include "llite_internal.h"
+#include <linux/lustre_compat25.h>
+
+#ifndef list_for_each_prev_safe
+#define list_for_each_prev_safe(pos, n, head) \
+ for (pos = (head)->prev, n = pos->prev; pos != (head); \
+ pos = n, n = pos->prev )
+#endif
+cfs_mem_cache_t *ll_async_page_slab = NULL;
+size_t ll_async_page_slab_size = 0;
/* SYNCHRONOUS I/O to object storage for an inode */
-static int ll_brw(int rw, struct inode *inode, struct page *page, int create)
+static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
+ struct page *page, int flags)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *md = lli->lli_smd;
+ struct lov_stripe_md *lsm = lli->lli_smd;
+ struct obd_info oinfo = { { { 0 } } };
struct brw_page pg;
- int err;
- struct io_cb_data *cbd = ll_init_cb();
+ int opc, rc;
ENTRY;
- if (!cbd)
- RETURN(-ENOMEM);
-
pg.pg = page;
- pg.count = PAGE_SIZE;
- pg.off = ((obd_off)page->index) << PAGE_SHIFT;
- pg.flag = create ? OBD_BRW_CREATE : 0;
+ pg.off = ((obd_off)page->index) << CFS_PAGE_SHIFT;
- err = obd_brw(rw, ll_i2obdconn(inode), md, 1, &pg, ll_sync_io_cb, cbd);
+ if ((cmd & OBD_BRW_WRITE) && (pg.off+CFS_PAGE_SIZE>i_size_read(inode)))
+ pg.count = i_size_read(inode) % CFS_PAGE_SIZE;
+ else
+ pg.count = CFS_PAGE_SIZE;
+
+ LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
+ cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
+ inode->i_ino, pg.off, pg.off);
+ if (pg.count == 0) {
+ CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
+ LPU64"\n", inode->i_ino, inode, i_size_read(inode),
+ page->mapping->host, i_size_read(page->mapping->host),
+ page->index, pg.off);
+ }
- RETURN(err);
-} /* ll_brw */
+ pg.flag = flags;
-/* returns the page unlocked, but with a reference */
-static int ll_readpage(struct file *file, struct page *page)
+ if (cmd & OBD_BRW_WRITE)
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_WRITE,
+ pg.count);
+ else
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_READ,
+ pg.count);
+ oinfo.oi_oa = oa;
+ oinfo.oi_md = lsm;
+ /* NB partial write, so we might not have CAPA_OPC_OSS_READ capa */
+ opc = cmd & OBD_BRW_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
+ oinfo.oi_capa = ll_osscapa_get(inode, opc);
+ rc = obd_brw(cmd, ll_i2dtexp(inode), &oinfo, 1, &pg, NULL);
+ capa_put(oinfo.oi_capa);
+ if (rc == 0)
+ obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
+ else if (rc != -EIO)
+ CERROR("error from obd_brw: rc = %d\n", rc);
+ RETURN(rc);
+}
+
+/* this isn't where truncate starts. roughly:
+ * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate. setattr_raw grabs
+ * DLM lock on [size, EOF], i_mutex, ->lli_size_sem, and WRITE_I_ALLOC_SEM to
+ * avoid races.
+ *
+ * must be called under ->lli_size_sem */
+void ll_truncate(struct inode *inode)
{
- struct inode *inode = page->mapping->host;
- obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
- int rc = 0;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_info oinfo = { { { 0 } } };
+ struct ost_lvb lvb;
+ struct obdo oa;
+ int rc;
ENTRY;
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu=%#Lx\n",inode->i_ino,
+ inode->i_generation, inode, i_size_read(inode),
+ i_size_read(inode));
- if (!PageLocked(page))
- LBUG();
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
+ if (lli->lli_size_sem_owner != current) {
+ EXIT;
+ return;
+ }
- if (inode->i_size <= offset) {
- memset(kmap(page), 0, PAGE_SIZE);
- kunmap(page);
- GOTO(readpage_out, rc);
+ if (!lli->lli_smd) {
+ CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
+ inode->i_ino);
+ GOTO(out_unlock, 0);
}
- if (Page_Uptodate(page)) {
- CERROR("Explain this please?\n");
- GOTO(readpage_out, rc);
+ LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
+
+ /* XXX I'm pretty sure this is a hack to paper over a more fundamental
+ * race condition. */
+ lov_stripe_lock(lli->lli_smd);
+ inode_init_lvb(inode, &lvb);
+ obd_merge_lvb(ll_i2dtexp(inode), lli->lli_smd, &lvb, 0);
+ if (lvb.lvb_size == i_size_read(inode)) {
+ CDEBUG(D_VFSTRACE, "skipping punch for obj "LPX64", %Lu=%#Lx\n",
+ lli->lli_smd->lsm_object_id, i_size_read(inode),
+ i_size_read(inode));
+ lov_stripe_unlock(lli->lli_smd);
+ GOTO(out_unlock, 0);
}
- rc = ll_brw(OBD_BRW_READ, inode, page, 0);
- EXIT;
+ obd_adjust_kms(ll_i2dtexp(inode), lli->lli_smd, i_size_read(inode), 1);
+ lov_stripe_unlock(lli->lli_smd);
+
+ if (unlikely((ll_i2sbi(inode)->ll_flags & LL_SBI_CHECKSUM) &&
+ (i_size_read(inode) & ~CFS_PAGE_MASK))) {
+ /* If the truncate leaves behind a partial page, update its
+ * checksum. */
+ struct page *page = find_get_page(inode->i_mapping,
+ i_size_read(inode) >>
+ CFS_PAGE_SHIFT);
+ if (page != NULL) {
+ struct ll_async_page *llap = llap_cast_private(page);
+ if (llap != NULL) {
+ llap->llap_checksum =
+ crc32_le(0, kmap(page), CFS_PAGE_SIZE);
+ kunmap(page);
+ }
+ page_cache_release(page);
+ }
+ }
- readpage_out:
- if (!rc)
- SetPageUptodate(page);
- UnlockPage(page);
- return 0;
-} /* ll_readpage */
+ CDEBUG(D_INFO, "calling punch for "LPX64" (new size %Lu=%#Lx)\n",
+ lli->lli_smd->lsm_object_id, i_size_read(inode), i_size_read(inode));
+
+ oinfo.oi_md = lli->lli_smd;
+ oinfo.oi_policy.l_extent.start = i_size_read(inode);
+ oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
+ oinfo.oi_oa = &oa;
+ oa.o_id = lli->lli_smd->lsm_object_id;
+ oa.o_gr = lli->lli_smd->lsm_object_gr;
+ oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
+
+ obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |
+ OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME |
+ OBD_MD_FLFID | OBD_MD_FLGENER);
+ ll_inode_size_unlock(inode, 0);
-static int ll_prepare_write(struct file *file, struct page *page, unsigned from,
- unsigned to)
+ oinfo.oi_capa = ll_osscapa_get(inode, CAPA_OPC_OSS_TRUNC);
+ rc = obd_punch_rqset(ll_i2dtexp(inode), &oinfo, NULL);
+ ll_truncate_free_capa(oinfo.oi_capa);
+ if (rc)
+ CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
+ else
+ obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
+ OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
+ EXIT;
+ return;
+
+ out_unlock:
+ ll_inode_size_unlock(inode, 0);
+} /* ll_truncate */
+
+int ll_prepare_write(struct file *file, struct page *page, unsigned from,
+ unsigned to)
{
struct inode *inode = page->mapping->host;
- obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct lov_stripe_md *lsm = lli->lli_smd;
+ obd_off offset = ((obd_off)page->index) << CFS_PAGE_SHIFT;
+ struct obd_info oinfo = { { { 0 } } };
+ struct brw_page pga;
+ struct obdo oa;
+ struct ost_lvb lvb;
int rc = 0;
- char *addr;
ENTRY;
- addr = kmap(page);
- if (!PageLocked(page))
- LBUG();
-
- if (Page_Uptodate(page))
- GOTO(prepare_done, rc);
+ LASSERT(PageLocked(page));
+ (void)llap_cast_private(page); /* assertion */
+
+ /* Check to see if we should return -EIO right away */
+ pga.pg = page;
+ pga.off = offset;
+ pga.count = CFS_PAGE_SIZE;
+ pga.flag = 0;
+
+ oa.o_mode = inode->i_mode;
+ oa.o_id = lsm->lsm_object_id;
+ oa.o_gr = lsm->lsm_object_gr;
+ oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE |
+ OBD_MD_FLTYPE | OBD_MD_FLGROUP;
+ obdo_from_inode(&oa, inode, OBD_MD_FLFID | OBD_MD_FLGENER);
+
+ oinfo.oi_oa = &oa;
+ oinfo.oi_md = lsm;
+ rc = obd_brw(OBD_BRW_CHECK, ll_i2dtexp(inode), &oinfo, 1, &pga, NULL);
+ if (rc)
+ RETURN(rc);
+
+ if (PageUptodate(page)) {
+ LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
+ RETURN(0);
+ }
/* We're completely overwriting an existing page, so _don't_ set it up
* to date until commit_write */
- if (from == 0 && to == PAGE_SIZE)
+ if (from == 0 && to == CFS_PAGE_SIZE) {
+ LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
+ POISON_PAGE(page, 0x11);
RETURN(0);
+ }
- /* We are writing to a new page, no need to read old data */
- if (inode->i_size <= offset) {
- memset(addr, 0, PAGE_SIZE);
- goto prepare_done;
+ /* If are writing to a new page, no need to read old data. The extent
+ * locking will have updated the KMS, and for our purposes here we can
+ * treat it like i_size. */
+ lov_stripe_lock(lsm);
+ inode_init_lvb(inode, &lvb);
+ obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
+ lov_stripe_unlock(lsm);
+ if (lvb.lvb_size <= offset) {
+ LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
+ lvb.lvb_size, offset);
+ memset(kmap(page), 0, CFS_PAGE_SIZE);
+ kunmap(page);
+ GOTO(prepare_done, rc = 0);
}
- rc = ll_brw(OBD_BRW_READ, inode, page, 0);
+ /* XXX could be an async ocp read.. read-ahead? */
+ rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
+ if (rc == 0) {
+ /* bug 1598: don't clobber blksize */
+ oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
+ obdo_refresh_inode(inode, &oa, oa.o_valid);
+ }
EXIT;
prepare_done:
- if (!rc)
+ if (rc == 0)
SetPageUptodate(page);
return rc;
}
-/* returns the page unlocked, but with a reference */
-static int ll_writepage(struct page *page)
+static int ll_ap_make_ready(void *data, int cmd)
+{
+ struct ll_async_page *llap;
+ struct page *page;
+ ENTRY;
+
+ llap = LLAP_FROM_COOKIE(data);
+ page = llap->llap_page;
+
+ LASSERTF(!(cmd & OBD_BRW_READ), "cmd %x page %p ino %lu index %lu\n", cmd, page,
+ page->mapping->host->i_ino, page->index);
+
+ /* we're trying to write, but the page is locked.. come back later */
+ if (TryLockPage(page))
+ RETURN(-EAGAIN);
+
+ LASSERT(!PageWriteback(page));
+
+ /* if we left PageDirty we might get another writepage call
+ * in the future. list walkers are bright enough
+ * to check page dirty so we can leave it on whatever list
+ * its on. XXX also, we're called with the cli list so if
+ * we got the page cache list we'd create a lock inversion
+ * with the removepage path which gets the page lock then the
+ * cli lock */
+ LASSERTF(!PageWriteback(page),"cmd %x page %p ino %lu index %lu\n", cmd, page,
+ page->mapping->host->i_ino, page->index);
+ clear_page_dirty_for_io(page);
+
+ /* This actually clears the dirty bit in the radix tree.*/
+ set_page_writeback(page);
+
+ LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
+ page_cache_get(page);
+
+ RETURN(0);
+}
+
+/* We have two reasons for giving llite the opportunity to change the
+ * write length of a given queued page as it builds the RPC containing
+ * the page:
+ *
+ * 1) Further extending writes may have landed in the page cache
+ * since a partial write first queued this page requiring us
+ * to write more from the page cache. (No further races are possible, since
+ * by the time this is called, the page is locked.)
+ * 2) We might have raced with truncate and want to avoid performing
+ * write RPCs that are just going to be thrown away by the
+ * truncate's punch on the storage targets.
+ *
+ * The kms serves these purposes as it is set at both truncate and extending
+ * writes.
+ */
+static int ll_ap_refresh_count(void *data, int cmd)
+{
+ struct ll_inode_info *lli;
+ struct ll_async_page *llap;
+ struct lov_stripe_md *lsm;
+ struct page *page;
+ struct inode *inode;
+ struct ost_lvb lvb;
+ __u64 kms;
+ ENTRY;
+
+ /* readpage queues with _COUNT_STABLE, shouldn't get here. */
+ LASSERT(cmd != OBD_BRW_READ);
+
+ llap = LLAP_FROM_COOKIE(data);
+ page = llap->llap_page;
+ inode = page->mapping->host;
+ lli = ll_i2info(inode);
+ lsm = lli->lli_smd;
+
+ lov_stripe_lock(lsm);
+ inode_init_lvb(inode, &lvb);
+ obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
+ kms = lvb.lvb_size;
+ lov_stripe_unlock(lsm);
+
+ /* catch race with truncate */
+ if (((__u64)page->index << CFS_PAGE_SHIFT) >= kms)
+ return 0;
+
+ /* catch sub-page write at end of file */
+ if (((__u64)page->index << CFS_PAGE_SHIFT) + CFS_PAGE_SIZE > kms)
+ return kms % CFS_PAGE_SIZE;
+
+ return CFS_PAGE_SIZE;
+}
+
+void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
{
+ struct lov_stripe_md *lsm;
+ obd_flag valid_flags;
+
+ lsm = ll_i2info(inode)->lli_smd;
+
+ oa->o_id = lsm->lsm_object_id;
+ oa->o_gr = lsm->lsm_object_gr;
+ oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
+ valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
+ if (cmd & OBD_BRW_WRITE) {
+ oa->o_valid |= OBD_MD_FLEPOCH;
+ oa->o_easize = ll_i2info(inode)->lli_ioepoch;
+
+ valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
+ OBD_MD_FLUID | OBD_MD_FLGID |
+ OBD_MD_FLFID | OBD_MD_FLGENER;
+ }
+
+ obdo_from_inode(oa, inode, valid_flags);
+}
+
+static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
+{
+ struct ll_async_page *llap;
+ ENTRY;
+
+ llap = LLAP_FROM_COOKIE(data);
+ ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
+
+ EXIT;
+}
+
+static void ll_ap_update_obdo(void *data, int cmd, struct obdo *oa,
+ obd_valid valid)
+{
+ struct ll_async_page *llap;
+ ENTRY;
+
+ llap = LLAP_FROM_COOKIE(data);
+ obdo_from_inode(oa, llap->llap_page->mapping->host, valid);
+
+ EXIT;
+}
+
+static struct obd_capa *ll_ap_lookup_capa(void *data, int cmd)
+{
+ struct ll_async_page *llap = LLAP_FROM_COOKIE(data);
+ int opc = cmd & OBD_BRW_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
+
+ return ll_osscapa_get(llap->llap_page->mapping->host, opc);
+}
+
+static struct obd_async_page_ops ll_async_page_ops = {
+ .ap_make_ready = ll_ap_make_ready,
+ .ap_refresh_count = ll_ap_refresh_count,
+ .ap_fill_obdo = ll_ap_fill_obdo,
+ .ap_update_obdo = ll_ap_update_obdo,
+ .ap_completion = ll_ap_completion,
+ .ap_lookup_capa = ll_ap_lookup_capa,
+};
+
+struct ll_async_page *llap_cast_private(struct page *page)
+{
+ struct ll_async_page *llap = (struct ll_async_page *)page_private(page);
+
+ LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
+ "page %p private %lu gave magic %d which != %d\n",
+ page, page_private(page), llap->llap_magic, LLAP_MAGIC);
+
+ return llap;
+}
+
+/* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
+ *
+ * There is an llap attached onto every page in lustre, linked off @sbi.
+ * We add an llap to the list so we don't lose our place during list walking.
+ * If llaps in the list are being moved they will only move to the end
+ * of the LRU, and we aren't terribly interested in those pages here (we
+ * start at the beginning of the list where the least-used llaps are.
+ */
+int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
+{
+ struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
+ unsigned long total, want, count = 0;
+
+ total = sbi->ll_async_page_count;
+
+ /* There can be a large number of llaps (600k or more in a large
+ * memory machine) so the VM 1/6 shrink ratio is likely too much.
+ * Since we are freeing pages also, we don't necessarily want to
+ * shrink so much. Limit to 40MB of pages + llaps per call. */
+ if (shrink_fraction == 0)
+ want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
+ else
+ want = (total + shrink_fraction - 1) / shrink_fraction;
+
+ if (want > 40 << (20 - CFS_PAGE_SHIFT))
+ want = 40 << (20 - CFS_PAGE_SHIFT);
+
+ CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
+ want, total, shrink_fraction);
+
+ spin_lock(&sbi->ll_lock);
+ list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
+
+ while (--total >= 0 && count < want) {
+ struct page *page;
+ int keep;
+
+ if (unlikely(need_resched())) {
+ spin_unlock(&sbi->ll_lock);
+ cond_resched();
+ spin_lock(&sbi->ll_lock);
+ }
+
+ llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
+ list_del_init(&dummy_llap.llap_pglist_item);
+ if (llap == NULL)
+ break;
+
+ page = llap->llap_page;
+ LASSERT(page != NULL);
+
+ list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
+
+ /* Page needs/undergoing IO */
+ if (TryLockPage(page)) {
+ LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
+ continue;
+ }
+
+ keep = (llap->llap_write_queued || PageDirty(page) ||
+ PageWriteback(page) || (!PageUptodate(page) &&
+ llap->llap_origin != LLAP_ORIGIN_READAHEAD));
+
+ LL_CDEBUG_PAGE(D_PAGE, page,"%s LRU page: %s%s%s%s%s origin %s\n",
+ keep ? "keep" : "drop",
+ llap->llap_write_queued ? "wq " : "",
+ PageDirty(page) ? "pd " : "",
+ PageUptodate(page) ? "" : "!pu ",
+ PageWriteback(page) ? "wb" : "",
+ llap->llap_defer_uptodate ? "" : "!du",
+ llap_origins[llap->llap_origin]);
+
+ /* If page is dirty or undergoing IO don't discard it */
+ if (keep) {
+ unlock_page(page);
+ continue;
+ }
+
+ page_cache_get(page);
+ spin_unlock(&sbi->ll_lock);
+
+ if (page->mapping != NULL) {
+ ll_teardown_mmaps(page->mapping,
+ (__u64)page->index << CFS_PAGE_SHIFT,
+ ((__u64)page->index << CFS_PAGE_SHIFT)|
+ ~CFS_PAGE_MASK);
+ if (!PageDirty(page) && !page_mapped(page)) {
+ ll_ra_accounting(llap, page->mapping);
+ ll_truncate_complete_page(page);
+ ++count;
+ } else {
+ LL_CDEBUG_PAGE(D_PAGE, page, "Not dropping page"
+ " because it is "
+ "%s\n",
+ PageDirty(page)?
+ "dirty":"mapped");
+ }
+ }
+ unlock_page(page);
+ page_cache_release(page);
+
+ spin_lock(&sbi->ll_lock);
+ }
+ list_del(&dummy_llap.llap_pglist_item);
+ spin_unlock(&sbi->ll_lock);
+
+ CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
+ count, want, total);
+
+ return count;
+}
+
+struct ll_async_page *llap_from_page(struct page *page, unsigned origin)
+{
+ struct ll_async_page *llap;
+ struct obd_export *exp;
struct inode *inode = page->mapping->host;
- int err;
+ struct ll_sb_info *sbi;
+ int rc;
ENTRY;
- if (!PageLocked(page))
- LBUG();
+ if (!inode) {
+ static int triggered;
- err = ll_brw(OBD_BRW_WRITE, inode, page, 1);
- if ( !err ) {
- //SetPageUptodate(page);
- set_page_clean(page);
- } else {
- CERROR("ll_brw failure %d\n", err);
+ if (!triggered) {
+ LL_CDEBUG_PAGE(D_ERROR, page, "Bug 10047. Wrong anon "
+ "page received\n");
+ libcfs_debug_dumpstack(NULL);
+ triggered = 1;
+ }
+ RETURN(ERR_PTR(-EINVAL));
}
- unlock_page(page);
- RETURN(err);
+ sbi = ll_i2sbi(inode);
+ LASSERT(ll_async_page_slab);
+ LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
+
+ llap = llap_cast_private(page);
+ if (llap != NULL) {
+ /* move to end of LRU list, except when page is just about to
+ * die */
+ if (origin != LLAP_ORIGIN_REMOVEPAGE) {
+ spin_lock(&sbi->ll_lock);
+ sbi->ll_pglist_gen++;
+ list_del_init(&llap->llap_pglist_item);
+ list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
+ spin_unlock(&sbi->ll_lock);
+ }
+ GOTO(out, llap);
+ }
+
+ exp = ll_i2dtexp(page->mapping->host);
+ if (exp == NULL)
+ RETURN(ERR_PTR(-EINVAL));
+
+ /* limit the number of lustre-cached pages */
+ if (sbi->ll_async_page_count >= sbi->ll_async_page_max)
+ llap_shrink_cache(sbi, 0);
+
+ OBD_SLAB_ALLOC(llap, ll_async_page_slab, CFS_ALLOC_STD,
+ ll_async_page_slab_size);
+ if (llap == NULL)
+ RETURN(ERR_PTR(-ENOMEM));
+ llap->llap_magic = LLAP_MAGIC;
+ llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
+
+ rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
+ (obd_off)page->index << CFS_PAGE_SHIFT,
+ &ll_async_page_ops, llap, &llap->llap_cookie);
+ if (rc) {
+ OBD_SLAB_FREE(llap, ll_async_page_slab,
+ ll_async_page_slab_size);
+ RETURN(ERR_PTR(rc));
+ }
+
+ CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
+ page, llap->llap_cookie, (obd_off)page->index << CFS_PAGE_SHIFT);
+ /* also zeroing the PRIVBITS low order bitflags */
+ __set_page_ll_data(page, llap);
+ llap->llap_page = page;
+ spin_lock(&sbi->ll_lock);
+ sbi->ll_pglist_gen++;
+ sbi->ll_async_page_count++;
+ list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
+ INIT_LIST_HEAD(&llap->llap_pending_write);
+ spin_unlock(&sbi->ll_lock);
+
+ out:
+ if (unlikely(sbi->ll_flags & LL_SBI_CHECKSUM)) {
+ __u32 csum = 0;
+ csum = crc32_le(csum, kmap(page), CFS_PAGE_SIZE);
+ kunmap(page);
+ if (origin == LLAP_ORIGIN_READAHEAD ||
+ origin == LLAP_ORIGIN_READPAGE) {
+ llap->llap_checksum = 0;
+ } else if (origin == LLAP_ORIGIN_COMMIT_WRITE ||
+ llap->llap_checksum == 0) {
+ llap->llap_checksum = csum;
+ CDEBUG(D_PAGE, "page %p cksum %x\n", page, csum);
+ } else if (llap->llap_checksum == csum) {
+ /* origin == LLAP_ORIGIN_WRITEPAGE */
+ CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
+ page, csum);
+ } else {
+ /* origin == LLAP_ORIGIN_WRITEPAGE */
+ LL_CDEBUG_PAGE(D_ERROR, page, "old cksum %x != new "
+ "%x!\n", llap->llap_checksum, csum);
+ }
+ }
+
+ llap->llap_origin = origin;
+ RETURN(llap);
}
+static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
+ struct ll_async_page *llap,
+ unsigned to, obd_flag async_flags)
+{
+ unsigned long size_index = i_size_read(inode) >> CFS_PAGE_SHIFT;
+ struct obd_io_group *oig;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ int rc, noquot = llap->llap_ignore_quota ? OBD_BRW_NOQUOTA : 0;
+ ENTRY;
+
+ /* _make_ready only sees llap once we've unlocked the page */
+ llap->llap_write_queued = 1;
+ rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
+ llap->llap_cookie, OBD_BRW_WRITE | noquot,
+ 0, 0, 0, async_flags);
+ if (rc == 0) {
+ LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
+ GOTO(out, 0);
+ }
-/* SYNCHRONOUS I/O to object storage for an inode -- object attr will be updated
- * too */
-static int ll_commit_write(struct file *file, struct page *page,
- unsigned from, unsigned to)
+ llap->llap_write_queued = 0;
+ /* Do not pass llap here as it is sync write. */
+ llap_write_pending(inode, NULL);
+
+ rc = oig_init(&oig);
+ if (rc)
+ GOTO(out, rc);
+
+ /* make full-page requests if we are not at EOF (bug 4410) */
+ if (to != CFS_PAGE_SIZE && llap->llap_page->index < size_index) {
+ LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
+ "sync write before EOF: size_index %lu, to %d\n",
+ size_index, to);
+ to = CFS_PAGE_SIZE;
+ } else if (to != CFS_PAGE_SIZE && llap->llap_page->index == size_index) {
+ int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
+ LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
+ "sync write at EOF: size_index %lu, to %d/%d\n",
+ size_index, to, size_to);
+ if (to < size_to)
+ to = size_to;
+ }
+
+ /* compare the checksum once before the page leaves llite */
+ if (unlikely((sbi->ll_flags & LL_SBI_CHECKSUM) &&
+ llap->llap_checksum != 0)) {
+ __u32 csum = 0;
+ struct page *page = llap->llap_page;
+ csum = crc32_le(csum, kmap(page), CFS_PAGE_SIZE);
+ kunmap(page);
+ if (llap->llap_checksum == csum) {
+ CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
+ page, csum);
+ } else {
+ CERROR("page %p old cksum %x != new cksum %x!\n",
+ page, llap->llap_checksum, csum);
+ }
+ }
+
+ rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
+ llap->llap_cookie, OBD_BRW_WRITE | noquot,
+ 0, to, 0, ASYNC_READY | ASYNC_URGENT |
+ ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
+ if (rc)
+ GOTO(free_oig, rc);
+
+ rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
+ if (rc)
+ GOTO(free_oig, rc);
+
+ rc = oig_wait(oig);
+
+ if (!rc && async_flags & ASYNC_READY) {
+ unlock_page(llap->llap_page);
+ if (PageWriteback(llap->llap_page)) {
+ end_page_writeback(llap->llap_page);
+ }
+ }
+
+ if (rc == 0 && llap_write_complete(inode, llap))
+ ll_queue_done_writing(inode, 0);
+
+ LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
+
+free_oig:
+ oig_release(oig);
+out:
+ RETURN(rc);
+}
+
+/* update our write count to account for i_size increases that may have
+ * happened since we've queued the page for io. */
+
+/* be careful not to return success without setting the page Uptodate or
+ * the next pass through prepare_write will read in stale data from disk. */
+int ll_commit_write(struct file *file, struct page *page, unsigned from,
+ unsigned to)
{
- int create = 1;
struct inode *inode = page->mapping->host;
struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *md = lli->lli_smd;
- struct brw_page pg;
- int err;
+ struct lov_stripe_md *lsm = lli->lli_smd;
+ struct obd_export *exp;
+ struct ll_async_page *llap;
loff_t size;
- struct io_cb_data *cbd = ll_init_cb();
+ int rc = 0;
+ ENTRY;
- pg.pg = page;
- pg.count = to;
- pg.off = (((obd_off)page->index) << PAGE_SHIFT);
- pg.flag = create ? OBD_BRW_CREATE : 0;
+ SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
+ LASSERT(inode == file->f_dentry->d_inode);
+ LASSERT(PageLocked(page));
+
+ CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
+ inode, page, from, to, page->index);
+
+ llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
+ if (IS_ERR(llap))
+ RETURN(PTR_ERR(llap));
+
+ exp = ll_i2dtexp(inode);
+ if (exp == NULL)
+ RETURN(-EINVAL);
+
+ llap->llap_ignore_quota = capable(CAP_SYS_RESOURCE);
+
+ /*
+ * queue a write for some time in the future the first time we
+ * dirty the page.
+ *
+ * This is different from what other file systems do: they usually
+ * just mark page (and some of its buffers) dirty and rely on
+ * balance_dirty_pages() to start a write-back. Lustre wants write-back
+ * to be started earlier for the following reasons:
+ *
+ * (1) with a large number of clients we need to limit the amount
+ * of cached data on the clients a lot;
+ *
+ * (2) large compute jobs generally want compute-only then io-only
+ * and the IO should complete as quickly as possible;
+ *
+ * (3) IO is batched up to the RPC size and is async until the
+ * client max cache is hit
+ * (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
+ *
+ */
+ if (!PageDirty(page)) {
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_MISSES, 1);
+
+ rc = queue_or_sync_write(exp, inode, llap, to, 0);
+ if (rc)
+ GOTO(out, rc);
+ } else {
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_HITS, 1);
+ }
+
+ /* put the page in the page cache, from now on ll_removepage is
+ * responsible for cleaning up the llap.
+ * only set page dirty when it's queued to be write out */
+ if (llap->llap_write_queued)
+ set_page_dirty(page);
+
+out:
+ size = (((obd_off)page->index) << CFS_PAGE_SHIFT) + to;
+ ll_inode_size_lock(inode, 0);
+ if (rc == 0) {
+ lov_stripe_lock(lsm);
+ obd_adjust_kms(exp, lsm, size, 0);
+ lov_stripe_unlock(lsm);
+ if (size > i_size_read(inode))
+ i_size_write(inode, size);
+ SetPageUptodate(page);
+ } else if (size > i_size_read(inode)) {
+ /* this page beyond the pales of i_size, so it can't be
+ * truncated in ll_p_r_e during lock revoking. we must
+ * teardown our book-keeping here. */
+ ll_removepage(page);
+ }
+ ll_inode_size_unlock(inode, 0);
+ RETURN(rc);
+}
+static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
+{
+ struct ll_ra_info *ra = &sbi->ll_ra_info;
+ unsigned long ret;
ENTRY;
- if (!cbd)
- RETURN(-ENOMEM);
- SetPageUptodate(page);
+ spin_lock(&sbi->ll_lock);
+ ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
+ ra->ra_cur_pages += ret;
+ spin_unlock(&sbi->ll_lock);
- if (!PageLocked(page))
- LBUG();
+ RETURN(ret);
+}
- CDEBUG(D_INODE, "commit_page writing (at %d) to %d, count %Ld\n",
- from, to, (unsigned long long)pg.count);
+static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
+{
+ struct ll_ra_info *ra = &sbi->ll_ra_info;
+ spin_lock(&sbi->ll_lock);
+ LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
+ ra->ra_cur_pages, len);
+ ra->ra_cur_pages -= len;
+ spin_unlock(&sbi->ll_lock);
+}
- err = obd_brw(OBD_BRW_WRITE, ll_i2obdconn(inode), md,
- 1, &pg, ll_sync_io_cb, cbd);
- kunmap(page);
+/* called for each page in a completed rpc.*/
+int ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
+{
+ struct ll_async_page *llap;
+ struct page *page;
+ int ret = 0;
+ ENTRY;
- size = pg.off + pg.count;
- /* do NOT truncate when writing in the middle of a file */
- if (size > inode->i_size)
- inode->i_size = size;
+ llap = LLAP_FROM_COOKIE(data);
+ page = llap->llap_page;
+ LASSERT(PageLocked(page));
+ LASSERT(CheckWriteback(page,cmd));
+
+ LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
+
+ if (cmd & OBD_BRW_READ && llap->llap_defer_uptodate)
+ ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
+
+ if (rc == 0) {
+ if (cmd & OBD_BRW_READ) {
+ if (!llap->llap_defer_uptodate)
+ SetPageUptodate(page);
+ } else {
+ llap->llap_write_queued = 0;
+ }
+ ClearPageError(page);
+ } else {
+ if (cmd & OBD_BRW_READ) {
+ llap->llap_defer_uptodate = 0;
+ }
+ SetPageError(page);
+ if (rc == -ENOSPC)
+ set_bit(AS_ENOSPC, &page->mapping->flags);
+ else
+ set_bit(AS_EIO, &page->mapping->flags);
+ }
- RETURN(err);
-} /* ll_commit_write */
+ unlock_page(page);
-void ll_truncate(struct inode *inode)
+ if (cmd & OBD_BRW_WRITE) {
+ /* Only rc == 0, write succeed, then this page could be deleted
+ * from the pending_writing list
+ */
+ if (rc == 0 && llap_write_complete(page->mapping->host, llap))
+ ll_queue_done_writing(page->mapping->host, 0);
+ }
+
+ if (PageWriteback(page)) {
+ end_page_writeback(page);
+ }
+ page_cache_release(page);
+
+ RETURN(ret);
+}
+
+/* the kernel calls us here when a page is unhashed from the page cache.
+ * the page will be locked and the kernel is holding a spinlock, so
+ * we need to be careful. we're just tearing down our book-keeping
+ * here. */
+void ll_removepage(struct page *page)
{
- struct obdo oa = {0};
- struct lov_stripe_md *md = ll_i2info(inode)->lli_smd;
- struct lustre_handle *lockhs = NULL;
- int err;
+ struct inode *inode = page->mapping->host;
+ struct obd_export *exp;
+ struct ll_async_page *llap;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ int rc;
ENTRY;
- if (!md) {
- /* object not yet allocated */
- inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ LASSERT(!in_interrupt());
+
+ /* sync pages or failed read pages can leave pages in the page
+ * cache that don't have our data associated with them anymore */
+ if (page_private(page) == 0) {
+ EXIT;
return;
}
- oa.o_id = md->lmd_object_id;
- oa.o_size = inode->i_size;
+ LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
- CDEBUG(D_INFO, "calling punch for %ld (all bytes after %Ld)\n",
- (long)oa.o_id, (unsigned long long)oa.o_size);
+ exp = ll_i2dtexp(inode);
+ if (exp == NULL) {
+ CERROR("page %p ind %lu gave null export\n", page, page->index);
+ EXIT;
+ return;
+ }
- err = ll_size_lock(inode, md, oa.o_size, LCK_PW, &lockhs);
- if (err) {
- CERROR("ll_size_lock failed: %d\n", err);
- /* FIXME: What to do here? It's too late to back out... */
- LBUG();
+ llap = llap_from_page(page, LLAP_ORIGIN_REMOVEPAGE);
+ if (IS_ERR(llap)) {
+ CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
+ page->index, PTR_ERR(llap));
+ EXIT;
+ return;
}
- oa.o_valid = OBD_MD_FLID;
- /* truncate == punch to/from start from/to end:
- set end to -1 for that. */
- err = obd_punch(ll_i2obdconn(inode), &oa, md, inode->i_size,
- 0xffffffffffffffff);
- if (err)
- CERROR("obd_truncate fails (%d)\n", err);
- else
- obdo_to_inode(inode, &oa, oa.o_valid);
+ if (llap_write_complete(inode, llap))
+ ll_queue_done_writing(inode, 0);
+
+ rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
+ llap->llap_cookie);
+ if (rc != 0)
+ CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
+
+ /* this unconditional free is only safe because the page lock
+ * is providing exclusivity to memory pressure/truncate/writeback..*/
+ __clear_page_ll_data(page);
+
+ spin_lock(&sbi->ll_lock);
+ if (!list_empty(&llap->llap_pglist_item))
+ list_del_init(&llap->llap_pglist_item);
+ sbi->ll_pglist_gen++;
+ sbi->ll_async_page_count--;
+ spin_unlock(&sbi->ll_lock);
+ OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
+ EXIT;
+}
- err = ll_size_unlock(inode, md, LCK_PW, lockhs);
- if (err)
- CERROR("ll_size_unlock failed: %d\n", err);
+static int ll_page_matches(struct page *page, int fd_flags)
+{
+ struct lustre_handle match_lockh = {0};
+ struct inode *inode = page->mapping->host;
+ ldlm_policy_data_t page_extent;
+ int flags, matches;
+ ENTRY;
- EXIT;
- return;
-} /* ll_truncate */
+ if (unlikely(fd_flags & LL_FILE_GROUP_LOCKED))
+ RETURN(1);
+
+ page_extent.l_extent.start = (__u64)page->index << CFS_PAGE_SHIFT;
+ page_extent.l_extent.end =
+ page_extent.l_extent.start + CFS_PAGE_SIZE - 1;
+ flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
+ if (!(fd_flags & LL_FILE_READAHEAD))
+ flags |= LDLM_FL_CBPENDING;
+ matches = obd_match(ll_i2sbi(inode)->ll_dt_exp,
+ ll_i2info(inode)->lli_smd, LDLM_EXTENT,
+ &page_extent, LCK_PR | LCK_PW, &flags, inode,
+ &match_lockh);
+ RETURN(matches);
+}
-int ll_direct_IO(int rw, struct inode *inode, struct kiobuf *iobuf,
- unsigned long blocknr, int blocksize)
+static int ll_issue_page_read(struct obd_export *exp,
+ struct ll_async_page *llap,
+ struct obd_io_group *oig, int defer)
{
- obd_count bufs_per_obdo = iobuf->nr_pages;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *md = lli->lli_smd;
- struct brw_page *pga;
- int rc = 0;
- int i;
- struct io_cb_data *cbd = ll_init_cb();
+ struct page *page = llap->llap_page;
+ int rc;
+
+ page_cache_get(page);
+ llap->llap_defer_uptodate = defer;
+ llap->llap_ra_used = 0;
+ rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
+ NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
+ CFS_PAGE_SIZE, 0, ASYNC_COUNT_STABLE |
+ ASYNC_READY | ASYNC_URGENT);
+ if (rc) {
+ LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
+ page_cache_release(page);
+ }
+ RETURN(rc);
+}
+
+static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
+{
+ LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
+ ra->ra_stats[which]++;
+}
+
+static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
+ struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
+
+ spin_lock(&sbi->ll_lock);
+ ll_ra_stats_inc_unlocked(ra, which);
+ spin_unlock(&sbi->ll_lock);
+}
+
+void ll_ra_accounting(struct ll_async_page *llap, struct address_space *mapping)
+{
+ if (!llap->llap_defer_uptodate || llap->llap_ra_used)
+ return;
+
+ ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
+}
+
+#define RAS_CDEBUG(ras) \
+ CDEBUG(D_READA, \
+ "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu\n", \
+ ras->ras_last_readpage, ras->ras_consecutive_requests, \
+ ras->ras_consecutive_pages, ras->ras_window_start, \
+ ras->ras_window_len, ras->ras_next_readahead, \
+ ras->ras_requests, ras->ras_request_index);
+
+static int index_in_window(unsigned long index, unsigned long point,
+ unsigned long before, unsigned long after)
+{
+ unsigned long start = point - before, end = point + after;
+
+ if (start > point)
+ start = 0;
+ if (end < point)
+ end = ~0;
+
+ return start <= index && index <= end;
+}
+
+static struct ll_readahead_state *ll_ras_get(struct file *f)
+{
+ struct ll_file_data *fd;
+
+ fd = LUSTRE_FPRIVATE(f);
+ return &fd->fd_ras;
+}
+void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
+{
+ struct ll_readahead_state *ras;
+
+ ras = ll_ras_get(f);
+
+ spin_lock(&ras->ras_lock);
+ ras->ras_requests++;
+ ras->ras_request_index = 0;
+ ras->ras_consecutive_requests++;
+ rar->lrr_reader = current;
+
+ list_add(&rar->lrr_linkage, &ras->ras_read_beads);
+ spin_unlock(&ras->ras_lock);
+}
+
+void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
+{
+ struct ll_readahead_state *ras;
+
+ ras = ll_ras_get(f);
+
+ spin_lock(&ras->ras_lock);
+ list_del_init(&rar->lrr_linkage);
+ spin_unlock(&ras->ras_lock);
+}
+
+static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
+{
+ struct ll_ra_read *scan;
+
+ list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
+ if (scan->lrr_reader == current)
+ return scan;
+ }
+ return NULL;
+}
+
+struct ll_ra_read *ll_ra_read_get(struct file *f)
+{
+ struct ll_readahead_state *ras;
+ struct ll_ra_read *bead;
+
+ ras = ll_ras_get(f);
+
+ spin_lock(&ras->ras_lock);
+ bead = ll_ra_read_get_locked(ras);
+ spin_unlock(&ras->ras_lock);
+ return bead;
+}
+
+static int ll_readahead(struct ll_readahead_state *ras,
+ struct obd_export *exp, struct address_space *mapping,
+ struct obd_io_group *oig, int flags)
+{
+ unsigned long i, start = 0, end = 0, reserved;
+ struct ll_async_page *llap;
+ struct page *page;
+ int rc, ret = 0, match_failed = 0;
+ __u64 kms;
+ unsigned int gfp_mask;
+ struct inode *inode;
+ struct lov_stripe_md *lsm;
+ struct ll_ra_read *bead;
+ struct ost_lvb lvb;
ENTRY;
- if (!cbd)
- RETURN(-ENOMEM);
- if (blocksize != PAGE_SIZE) {
- CERROR("direct_IO blocksize != PAGE_SIZE\n");
- return -EINVAL;
+ inode = mapping->host;
+ lsm = ll_i2info(inode)->lli_smd;
+
+ lov_stripe_lock(lsm);
+ inode_init_lvb(inode, &lvb);
+ obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
+ kms = lvb.lvb_size;
+ lov_stripe_unlock(lsm);
+ if (kms == 0) {
+ ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
+ RETURN(0);
}
- OBD_ALLOC(pga, sizeof(*pga) * bufs_per_obdo);
- if (!pga)
- GOTO(out, rc = -ENOMEM);
+ spin_lock(&ras->ras_lock);
+ bead = ll_ra_read_get_locked(ras);
+ /* Enlarge the RA window to encompass the full read */
+ if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
+ bead->lrr_start + bead->lrr_count) {
+ ras->ras_window_len = bead->lrr_start + bead->lrr_count -
+ ras->ras_window_start;
+ }
+ /* Reserve a part of the read-ahead window that we'll be issuing */
+ if (ras->ras_window_len) {
+ start = ras->ras_next_readahead;
+ end = ras->ras_window_start + ras->ras_window_len - 1;
+ }
+ if (end != 0) {
+ /* Truncate RA window to end of file */
+ end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
+ ras->ras_next_readahead = max(end, end + 1);
+ RAS_CDEBUG(ras);
+ }
+ spin_unlock(&ras->ras_lock);
- /* NB: we can't use iobuf->maplist[i]->index for the offset
- * instead of "blocknr" because ->index contains garbage.
- */
- for (i = 0; i < bufs_per_obdo; i++, blocknr++) {
- iobuf->maplist[i]->index = blocknr; /* XXX blksz = PAGE_SIZE? */
+ if (end == 0) {
+ ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
+ RETURN(0);
+ }
+
+ reserved = ll_ra_count_get(ll_i2sbi(inode), end - start + 1);
+ if (reserved < end - start + 1)
+ ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
+
+ gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
+#ifdef __GFP_NOWARN
+ gfp_mask |= __GFP_NOWARN;
+#endif
- pga[i].pg = iobuf->maplist[i];
- pga[i].count = PAGE_SIZE;
- pga[i].off = (obd_off)blocknr << PAGE_SHIFT;
- pga[i].flag = OBD_BRW_CREATE;
+ for (i = start; reserved > 0 && !match_failed && i <= end; i++) {
+ /* skip locked pages from previous readpage calls */
+ page = grab_cache_page_nowait_gfp(mapping, i, gfp_mask);
+ if (page == NULL) {
+ ll_ra_stats_inc(mapping, RA_STAT_FAILED_GRAB_PAGE);
+ CDEBUG(D_READA, "g_c_p_n failed\n");
+ continue;
+ }
+
+ /* Check if page was truncated or reclaimed */
+ if (page->mapping != mapping) {
+ ll_ra_stats_inc(mapping, RA_STAT_WRONG_GRAB_PAGE);
+ CDEBUG(D_READA, "g_c_p_n returned invalid page\n");
+ goto next_page;
+ }
+
+ /* we do this first so that we can see the page in the /proc
+ * accounting */
+ llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
+ if (IS_ERR(llap) || llap->llap_defer_uptodate)
+ goto next_page;
+
+ /* skip completed pages */
+ if (Page_Uptodate(page))
+ goto next_page;
+
+ /* bail when we hit the end of the lock. */
+ if ((rc = ll_page_matches(page, flags|LL_FILE_READAHEAD)) <= 0){
+ LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
+ "lock match failed: rc %d\n", rc);
+ ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
+ match_failed = 1;
+ goto next_page;
+ }
+
+ rc = ll_issue_page_read(exp, llap, oig, 1);
+ if (rc == 0) {
+ reserved--;
+ ret++;
+ LL_CDEBUG_PAGE(D_READA| D_PAGE, page,
+ "started read-ahead\n");
+ } else {
+ next_page:
+ LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
+ "skipping read-ahead\n");
+
+ unlock_page(page);
+ }
+ page_cache_release(page);
}
- if (!md || !md->lmd_object_id)
- GOTO(out, rc = -ENOMEM);
+ LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
+ if (reserved != 0)
+ ll_ra_count_put(ll_i2sbi(inode), reserved);
+ if (i == end + 1 && end == (kms >> CFS_PAGE_SHIFT))
+ ll_ra_stats_inc(mapping, RA_STAT_EOF);
+
+ /* if we didn't get to the end of the region we reserved from
+ * the ras we need to go back and update the ras so that the
+ * next read-ahead tries from where we left off. we only do so
+ * if the region we failed to issue read-ahead on is still ahead
+ * of the app and behind the next index to start read-ahead from */
+ if (i != end + 1) {
+ spin_lock(&ras->ras_lock);
+ if (i < ras->ras_next_readahead &&
+ index_in_window(i, ras->ras_window_start, 0,
+ ras->ras_window_len)) {
+ ras->ras_next_readahead = i;
+ RAS_CDEBUG(ras);
+ }
+ spin_unlock(&ras->ras_lock);
+ }
- rc = obd_brw(rw == WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
- ll_i2obdconn(inode), md, bufs_per_obdo, pga,
- ll_sync_io_cb, cbd);
- if (rc == 0)
- rc = bufs_per_obdo * PAGE_SIZE;
+ RETURN(ret);
+}
-out:
- OBD_FREE(pga, sizeof(*pga) * bufs_per_obdo);
- RETURN(rc);
+static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
+{
+ ras->ras_window_start = index & (~((1024 * 1024 >> CFS_PAGE_SHIFT) - 1));
}
+/* called with the ras_lock held or from places where it doesn't matter */
+static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
+{
+ ras->ras_last_readpage = index;
+ ras->ras_consecutive_requests = 0;
+ ras->ras_consecutive_pages = 0;
+ ras->ras_window_len = 0;
+ ras_set_start(ras, index);
+ ras->ras_next_readahead = max(ras->ras_window_start, index);
+
+ RAS_CDEBUG(ras);
+}
-int ll_flush_inode_pages(struct inode * inode)
+void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
{
- obd_count bufs_per_obdo = 0;
- obd_size *count = NULL;
- obd_off *offset = NULL;
- obd_flag *flags = NULL;
- int err = 0;
+ spin_lock_init(&ras->ras_lock);
+ ras_reset(ras, 0);
+ ras->ras_requests = 0;
+ INIT_LIST_HEAD(&ras->ras_read_beads);
+}
+static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
+ struct ll_readahead_state *ras, unsigned long index,
+ unsigned hit)
+{
+ struct ll_ra_info *ra = &sbi->ll_ra_info;
+ int zero = 0;
ENTRY;
- spin_lock(&pagecache_lock);
+ spin_lock(&sbi->ll_lock);
+ spin_lock(&ras->ras_lock);
+
+ ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
+
+ /* reset the read-ahead window in two cases. First when the app seeks
+ * or reads to some other part of the file. Secondly if we get a
+ * read-ahead miss that we think we've previously issued. This can
+ * be a symptom of there being so many read-ahead pages that the VM is
+ * reclaiming it before we get to it. */
+ if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
+ zero = 1;
+ ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
+ } else if (!hit && ras->ras_window_len &&
+ index < ras->ras_next_readahead &&
+ index_in_window(index, ras->ras_window_start, 0,
+ ras->ras_window_len)) {
+ zero = 1;
+ ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
+ }
- spin_unlock(&pagecache_lock);
+ /* On the second access to a file smaller than the tunable
+ * ra_max_read_ahead_whole_pages trigger RA on all pages in the
+ * file up to ra_max_pages. This is simply a best effort and
+ * only occurs once per open file. Normal RA behavior is reverted
+ * to for subsequent IO. The mmap case does not increment
+ * ras_requests and thus can never trigger this behavior. */
+ if (ras->ras_requests == 2 && !ras->ras_request_index) {
+ __u64 kms_pages;
+
+ kms_pages = (i_size_read(inode) + CFS_PAGE_SIZE - 1) >>
+ CFS_PAGE_SHIFT;
+
+ CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages,
+ ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages);
+
+ if (kms_pages &&
+ kms_pages <= ra->ra_max_read_ahead_whole_pages) {
+ ras->ras_window_start = 0;
+ ras->ras_last_readpage = 0;
+ ras->ras_next_readahead = 0;
+ ras->ras_window_len = min(ra->ra_max_pages,
+ ra->ra_max_read_ahead_whole_pages);
+ GOTO(out_unlock, 0);
+ }
+ }
+ if (zero) {
+ ras_reset(ras, index);
+ GOTO(out_unlock, 0);
+ }
- OBD_ALLOC(count, sizeof(*count) * bufs_per_obdo);
- OBD_ALLOC(offset, sizeof(*offset) * bufs_per_obdo);
- OBD_ALLOC(flags, sizeof(*flags) * bufs_per_obdo);
- if (!count || !offset || !flags)
- GOTO(out, err=-ENOMEM);
+ ras->ras_last_readpage = index;
+ ras->ras_consecutive_pages++;
+ ras_set_start(ras, index);
+ ras->ras_next_readahead = max(ras->ras_window_start,
+ ras->ras_next_readahead);
+
+ /* Trigger RA in the mmap case where ras_consecutive_requests
+ * is not incremented and thus can't be used to trigger RA */
+ if (!ras->ras_window_len && ras->ras_consecutive_pages == 3) {
+ ras->ras_window_len = 1024 * 1024 >> CFS_PAGE_SHIFT;
+ GOTO(out_unlock, 0);
+ }
-#if 0
- for (i = 0 ; i < bufs_per_obdo ; i++) {
- count[i] = PAGE_SIZE;
- offset[i] = ((obd_off)(iobuf->maplist[i])->index) << PAGE_SHIFT;
- flags[i] = OBD_BRW_CREATE;
+ /* The initial ras_window_len is set to the request size. To avoid
+ * uselessly reading and discarding pages for random IO the window is
+ * only increased once per consecutive request received. */
+ if (ras->ras_consecutive_requests > 1 && !ras->ras_request_index) {
+ ras->ras_window_len = min(ras->ras_window_len +
+ (1024 * 1024 >> CFS_PAGE_SHIFT),
+ ra->ra_max_pages);
}
- err = obd_brw(OBD_BRW_WRITE, ll_i2obdconn(inode),
- ll_i2info(inode)->lli_smd, bufs_per_obdo,
- iobuf->maplist, count, offset, flags, NULL, NULL);
- if (err == 0)
- err = bufs_per_obdo * 4096;
-#endif
- out:
- OBD_FREE(flags, sizeof(*flags) * bufs_per_obdo);
- OBD_FREE(count, sizeof(*count) * bufs_per_obdo);
- OBD_FREE(offset, sizeof(*offset) * bufs_per_obdo);
- RETURN(err);
+ EXIT;
+out_unlock:
+ RAS_CDEBUG(ras);
+ ras->ras_request_index++;
+ spin_unlock(&ras->ras_lock);
+ spin_unlock(&sbi->ll_lock);
+ return;
}
+int ll_writepage(struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_export *exp;
+ struct ll_async_page *llap;
+ int rc = 0;
+ ENTRY;
+ LASSERT(!PageDirty(page));
+ LASSERT(PageLocked(page));
-struct address_space_operations ll_aops = {
- readpage: ll_readpage,
- writepage: ll_writepage,
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,4,17))
- direct_IO: ll_direct_IO,
-#endif
- sync_page: block_sync_page,
- prepare_write: ll_prepare_write,
- commit_write: ll_commit_write,
- bmap: NULL
-};
+ exp = ll_i2dtexp(inode);
+ if (exp == NULL)
+ GOTO(out, rc = -EINVAL);
+
+ llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
+ if (IS_ERR(llap))
+ GOTO(out, rc = PTR_ERR(llap));
+
+ LASSERT(!PageWriteback(page));
+ set_page_writeback(page);
+
+ page_cache_get(page);
+ if (llap->llap_write_queued) {
+ LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
+ rc = obd_set_async_flags(exp, lli->lli_smd, NULL,
+ llap->llap_cookie,
+ ASYNC_READY | ASYNC_URGENT);
+ } else {
+ rc = queue_or_sync_write(exp, inode, llap, CFS_PAGE_SIZE,
+ ASYNC_READY | ASYNC_URGENT);
+ }
+ if (rc)
+ page_cache_release(page);
+out:
+ if (rc) {
+ if (!lli->lli_async_rc)
+ lli->lli_async_rc = rc;
+ /* re-dirty page on error so it retries write */
+ if (PageWriteback(page)) {
+ end_page_writeback(page);
+ }
+ /* resend page only for not started IO*/
+ if (!PageError(page))
+ ll_redirty_page(page);
+ unlock_page(page);
+ }
+ RETURN(rc);
+}
+
+/*
+ * for now we do our readpage the same on both 2.4 and 2.5. The kernel's
+ * read-ahead assumes it is valid to issue readpage all the way up to
+ * i_size, but our dlm locks make that not the case. We disable the
+ * kernel's read-ahead and do our own by walking ahead in the page cache
+ * checking for dlm lock coverage. the main difference between 2.4 and
+ * 2.6 is how read-ahead gets batched and issued, but we're using our own,
+ * so they look the same.
+ */
+int ll_readpage(struct file *filp, struct page *page)
+{
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
+ struct inode *inode = page->mapping->host;
+ struct obd_export *exp;
+ struct ll_async_page *llap;
+ struct obd_io_group *oig = NULL;
+ int rc;
+ ENTRY;
+
+ LASSERT(PageLocked(page));
+ LASSERT(!PageUptodate(page));
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset=%Lu=%#Lx\n",
+ inode->i_ino, inode->i_generation, inode,
+ (((loff_t)page->index) << CFS_PAGE_SHIFT),
+ (((loff_t)page->index) << CFS_PAGE_SHIFT));
+ LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
+
+ if (!ll_i2info(inode)->lli_smd) {
+ /* File with no objects - one big hole */
+ /* We use this just for remove_from_page_cache that is not
+ * exported, we'd make page back up to date. */
+ ll_truncate_complete_page(page);
+ clear_page(kmap(page));
+ kunmap(page);
+ SetPageUptodate(page);
+ unlock_page(page);
+ RETURN(0);
+ }
+
+ rc = oig_init(&oig);
+ if (rc < 0)
+ GOTO(out, rc);
+
+ exp = ll_i2dtexp(inode);
+ if (exp == NULL)
+ GOTO(out, rc = -EINVAL);
+
+ llap = llap_from_page(page, LLAP_ORIGIN_READPAGE);
+ if (IS_ERR(llap))
+ GOTO(out, rc = PTR_ERR(llap));
+
+ if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
+ ras_update(ll_i2sbi(inode), inode, &fd->fd_ras, page->index,
+ llap->llap_defer_uptodate);
+
+ if (llap->llap_defer_uptodate) {
+ llap->llap_ra_used = 1;
+ rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
+ fd->fd_flags);
+ if (rc > 0)
+ obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
+ NULL, oig);
+ LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
+ SetPageUptodate(page);
+ unlock_page(page);
+ GOTO(out_oig, rc = 0);
+ }
+
+ if (likely((fd->fd_flags & LL_FILE_IGNORE_LOCK) == 0)) {
+ rc = ll_page_matches(page, fd->fd_flags);
+ if (rc < 0) {
+ LL_CDEBUG_PAGE(D_ERROR, page, "lock match failed: rc %d\n", rc);
+ GOTO(out, rc);
+ }
+
+ if (rc == 0) {
+ CWARN("ino %lu page %lu (%llu) not covered by "
+ "a lock (mmap?). check debug logs.\n",
+ inode->i_ino, page->index,
+ (long long)page->index << CFS_PAGE_SHIFT);
+ }
+ }
+
+ rc = ll_issue_page_read(exp, llap, oig, 0);
+ if (rc)
+ GOTO(out, rc);
+
+ LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
+ if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
+ ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
+ fd->fd_flags);
+
+ rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
+
+out:
+ if (rc)
+ unlock_page(page);
+out_oig:
+ if (oig != NULL)
+ oig_release(oig);
+ RETURN(rc);
+}