/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * Lustre Lite I/O page cache routines shared by different kernel revs
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * Copyright (c) 2001-2003 Cluster File Systems, Inc.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * This file is part of Lustre, http://www.lustre.org.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
*
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * lustre/llite/rw.c
*
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Lustre Lite I/O page cache routines shared by different kernel revs
*/
+
#include <linux/autoconf.h>
#include <linux/kernel.h>
#include <linux/mm.h>
return rc;
}
+/**
+ * make page ready for ASYNC write
+ * \param data - pointer to llap cookie
+ * \param cmd - is OBD_BRW_* macroses
+ *
+ * \retval 0 is page successfully prepared to send
+ * \retval -EAGAIN is page not need to send
+ */
static int ll_ap_make_ready(void *data, int cmd)
{
struct ll_async_page *llap;
llap = LLAP_FROM_COOKIE(data);
page = llap->llap_page;
- LASSERTF(!(cmd & OBD_BRW_READ), "cmd %x page %p ino %lu index %lu\n", cmd, page,
- page->mapping->host->i_ino, page->index);
-
/* we're trying to write, but the page is locked.. come back later */
if (TryLockPage(page))
RETURN(-EAGAIN);
- LASSERT(!PageWriteback(page));
+ LASSERTF(!(cmd & OBD_BRW_READ) || !PageWriteback(page),
+ "cmd %x page %p ino %lu index %lu fl %lx\n", cmd, page,
+ page->mapping->host->i_ino, page->index, page->flags);
/* if we left PageDirty we might get another writepage call
* in the future. list walkers are bright enough
* cli lock */
LASSERTF(!PageWriteback(page),"cmd %x page %p ino %lu index %lu\n", cmd, page,
page->mapping->host->i_ino, page->index);
- clear_page_dirty_for_io(page);
+ if(!clear_page_dirty_for_io(page)) {
+ unlock_page(page);
+ RETURN(-EAGAIN);
+ }
/* This actually clears the dirty bit in the radix tree.*/
set_page_writeback(page);
return llap;
}
-/* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
+/* Try to reap @target pages in the specific @cpu's async page list.
*
* There is an llap attached onto every page in lustre, linked off @sbi.
* We add an llap to the list so we don't lose our place during list walking.
* If llaps in the list are being moved they will only move to the end
* of the LRU, and we aren't terribly interested in those pages here (we
- * start at the beginning of the list where the least-used llaps are.
- */
-int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
+ * start at the beginning of the list where the least-used llaps are. */
+static inline int llap_shrink_cache_internal(struct ll_sb_info *sbi,
+ int cpu, int target)
{
struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
- unsigned long total, want, count = 0;
-
- total = sbi->ll_async_page_count;
-
- /* There can be a large number of llaps (600k or more in a large
- * memory machine) so the VM 1/6 shrink ratio is likely too much.
- * Since we are freeing pages also, we don't necessarily want to
- * shrink so much. Limit to 40MB of pages + llaps per call. */
- if (shrink_fraction == 0)
- want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
- else
- want = (total + shrink_fraction - 1) / shrink_fraction;
-
- if (want > 40 << (20 - CFS_PAGE_SHIFT))
- want = 40 << (20 - CFS_PAGE_SHIFT);
-
- CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
- want, total, shrink_fraction);
-
- spin_lock(&sbi->ll_lock);
- list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
-
- while (--total >= 0 && count < want) {
+ struct ll_pglist_data *pd;
+ struct list_head *head;
+ int count = 0;
+
+ pd = ll_pglist_cpu_lock(sbi, cpu);
+ head = &pd->llpd_list;
+ list_add(&dummy_llap.llap_pglist_item, head);
+ while (count < target) {
struct page *page;
int keep;
if (unlikely(need_resched())) {
- spin_unlock(&sbi->ll_lock);
+ ll_pglist_cpu_unlock(sbi, cpu);
cond_resched();
- spin_lock(&sbi->ll_lock);
+ ll_pglist_cpu_lock(sbi, cpu);
}
- llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
+ llap = llite_pglist_next_llap(head,
+ &dummy_llap.llap_pglist_item);
list_del_init(&dummy_llap.llap_pglist_item);
if (llap == NULL)
break;
}
page_cache_get(page);
- spin_unlock(&sbi->ll_lock);
+ ll_pglist_cpu_unlock(sbi, cpu);
if (page->mapping != NULL) {
ll_teardown_mmaps(page->mapping,
unlock_page(page);
page_cache_release(page);
- spin_lock(&sbi->ll_lock);
+ ll_pglist_cpu_lock(sbi, cpu);
}
list_del(&dummy_llap.llap_pglist_item);
- spin_unlock(&sbi->ll_lock);
+ ll_pglist_cpu_unlock(sbi, cpu);
+
+ CDEBUG(D_CACHE, "shrank %d, expected %d however. \n", count, target);
+ return count;
+}
+
+
+/* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
+ *
+ * At first, this code calculates total pages wanted by @shrink_fraction, then
+ * it deduces how many pages should be reaped from each cpu in proportion as
+ * their own # of page count(llpd_count).
+ */
+int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
+{
+ unsigned long total, want, percpu_want, count = 0;
+ int cpu, nr_cpus;
+
+ total = lcounter_read(&sbi->ll_async_page_count);
+ if (total == 0)
+ return 0;
+
+#ifdef HAVE_SHRINKER_CACHE
+ want = shrink_fraction;
+ if (want == 0)
+ return total;
+#else
+ /* There can be a large number of llaps (600k or more in a large
+ * memory machine) so the VM 1/6 shrink ratio is likely too much.
+ * Since we are freeing pages also, we don't necessarily want to
+ * shrink so much. Limit to 40MB of pages + llaps per call. */
+ if (shrink_fraction <= 0)
+ want = total - sbi->ll_async_page_max + 32*num_online_cpus();
+ else
+ want = (total + shrink_fraction - 1) / shrink_fraction;
+#endif
+
+ if (want > 40 << (20 - CFS_PAGE_SHIFT))
+ want = 40 << (20 - CFS_PAGE_SHIFT);
+
+ CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
+ want, total, shrink_fraction);
+
+ nr_cpus = num_possible_cpus();
+ cpu = sbi->ll_async_page_clock_hand;
+ /* we at most do one round */
+ do {
+ int c;
+
+ cpu = (cpu + 1) % nr_cpus;
+ c = LL_PGLIST_DATA_CPU(sbi, cpu)->llpd_count;
+ if (!cpu_online(cpu))
+ percpu_want = c;
+ else
+ percpu_want = want / ((total / (c + 1)) + 1);
+ if (percpu_want == 0)
+ continue;
+
+ count += llap_shrink_cache_internal(sbi, cpu, percpu_want);
+ if (count >= want)
+ sbi->ll_async_page_clock_hand = cpu;
+ } while (cpu != sbi->ll_async_page_clock_hand);
CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
count, want, total);
+#ifdef HAVE_SHRINKER_CACHE
+ return lcounter_read(&sbi->ll_async_page_count);
+#else
return count;
+#endif
}
-struct ll_async_page *llap_from_page(struct page *page, unsigned origin)
+/* Rebalance the async page queue len for each cpu. We hope that the cpu
+ * which do much IO job has a relative longer queue len.
+ * This function should be called with preempt disabled.
+ */
+static inline int llap_async_cache_rebalance(struct ll_sb_info *sbi)
+{
+ unsigned long sample = 0, *cpu_sample, bias, slice;
+ struct ll_pglist_data *pd;
+ cpumask_t mask;
+ int cpu, surplus;
+ int w1 = 7, w2 = 3, base = (w1 + w2); /* weight value */
+ atomic_t *pcnt;
+
+ if (!spin_trylock(&sbi->ll_async_page_reblnc_lock)) {
+ /* someone else is doing the job */
+ return 1;
+ }
+
+ pcnt = &LL_PGLIST_DATA(sbi)->llpd_sample_count;
+ if (!atomic_read(pcnt)) {
+ /* rare case, somebody else has gotten this job done */
+ spin_unlock(&sbi->ll_async_page_reblnc_lock);
+ return 1;
+ }
+
+ sbi->ll_async_page_reblnc_count++;
+ cpu_sample = sbi->ll_async_page_sample;
+ memset(cpu_sample, 0, num_possible_cpus() * sizeof(unsigned long));
+ for_each_online_cpu(cpu) {
+ pcnt = &LL_PGLIST_DATA_CPU(sbi, cpu)->llpd_sample_count;
+ cpu_sample[cpu] = atomic_read(pcnt);
+ atomic_set(pcnt, 0);
+ sample += cpu_sample[cpu];
+ }
+
+ cpus_clear(mask);
+ surplus = sbi->ll_async_page_max;
+ slice = surplus / sample + 1;
+ sample /= num_online_cpus();
+ bias = sample >> 4;
+ for_each_online_cpu(cpu) {
+ pd = LL_PGLIST_DATA_CPU(sbi, cpu);
+ if (labs((long int)sample - cpu_sample[cpu]) > bias) {
+ unsigned long budget = pd->llpd_budget;
+ /* weighted original queue length and expected queue
+ * length to avoid thrashing. */
+ pd->llpd_budget = (budget * w1) / base +
+ (slice * cpu_sample[cpu]) * w2 / base;
+ cpu_set(cpu, mask);
+ }
+ surplus -= pd->llpd_budget;
+ }
+ surplus /= cpus_weight(mask) ?: 1;
+ for_each_cpu_mask(cpu, mask)
+ LL_PGLIST_DATA_CPU(sbi, cpu)->llpd_budget += surplus;
+ spin_unlock(&sbi->ll_async_page_reblnc_lock);
+
+ /* TODO: do we really need to call llap_shrink_cache_internal
+ * for every cpus with its page_count greater than budget?
+ * for_each_cpu_mask(cpu, mask)
+ * ll_shrink_cache_internal(...)
+ */
+
+ return 0;
+}
+
+static struct ll_async_page *llap_from_page_with_lockh(struct page *page,
+ unsigned origin,
+ struct lustre_handle *lockh)
{
struct ll_async_page *llap;
struct obd_export *exp;
struct inode *inode = page->mapping->host;
struct ll_sb_info *sbi;
- int rc;
+ struct ll_pglist_data *pd;
+ int rc, cpu, target;
ENTRY;
if (!inode) {
/* move to end of LRU list, except when page is just about to
* die */
if (origin != LLAP_ORIGIN_REMOVEPAGE) {
- spin_lock(&sbi->ll_lock);
- sbi->ll_pglist_gen++;
- list_del_init(&llap->llap_pglist_item);
- list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
- spin_unlock(&sbi->ll_lock);
+ int old_cpu = llap->llap_pglist_cpu;
+ struct ll_pglist_data *old_pd;
+
+ pd = ll_pglist_double_lock(sbi, old_cpu, &old_pd);
+ pd->llpd_hit++;
+ while (old_cpu != llap->llap_pglist_cpu) {
+ /* rarely case, someone else is touching this
+ * page too. */
+ ll_pglist_double_unlock(sbi, old_cpu);
+ old_cpu = llap->llap_pglist_cpu;
+ pd=ll_pglist_double_lock(sbi, old_cpu, &old_pd);
+ }
+
+ list_move(&llap->llap_pglist_item,
+ &pd->llpd_list);
+ old_pd->llpd_gen++;
+ if (pd->llpd_cpu != old_cpu) {
+ pd->llpd_count++;
+ old_pd->llpd_count--;
+ old_pd->llpd_gen++;
+ llap->llap_pglist_cpu = pd->llpd_cpu;
+ pd->llpd_cross++;
+ }
+ ll_pglist_double_unlock(sbi, old_cpu);
}
GOTO(out, llap);
}
RETURN(ERR_PTR(-EINVAL));
/* limit the number of lustre-cached pages */
- if (sbi->ll_async_page_count >= sbi->ll_async_page_max)
- llap_shrink_cache(sbi, 0);
+ cpu = get_cpu();
+ pd = LL_PGLIST_DATA(sbi);
+ target = pd->llpd_count - pd->llpd_budget;
+ if (target > 0) {
+ rc = 0;
+ atomic_inc(&pd->llpd_sample_count);
+ if (atomic_read(&pd->llpd_sample_count) >
+ sbi->ll_async_page_sample_max) {
+ pd->llpd_reblnc_count++;
+ rc = llap_async_cache_rebalance(sbi);
+ if (rc == 0)
+ target = pd->llpd_count - pd->llpd_budget;
+ }
+ /* if rc equals 1, it means other cpu is doing the rebalance
+ * job, and our budget # would be modified when we read it.
+ * Furthermore, it is much likely being increased because
+ * we have already reached the rebalance threshold. In this
+ * case, we skip to shrink cache here. */
+ if ((rc == 0) && target > 0)
+ llap_shrink_cache_internal(sbi, cpu, target + 32);
+ }
+ put_cpu();
OBD_SLAB_ALLOC(llap, ll_async_page_slab, CFS_ALLOC_STD,
ll_async_page_slab_size);
llap->llap_magic = LLAP_MAGIC;
llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
+ /* XXX: for bug 11270 - check for lockless origin here! */
+ if (origin == LLAP_ORIGIN_LOCKLESS_IO)
+ llap->llap_nocache = 1;
+
rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
(obd_off)page->index << CFS_PAGE_SHIFT,
- &ll_async_page_ops, llap, &llap->llap_cookie);
+ &ll_async_page_ops, llap, &llap->llap_cookie,
+ llap->llap_nocache, lockh);
if (rc) {
OBD_SLAB_FREE(llap, ll_async_page_slab,
ll_async_page_slab_size);
/* also zeroing the PRIVBITS low order bitflags */
__set_page_ll_data(page, llap);
llap->llap_page = page;
- spin_lock(&sbi->ll_lock);
- sbi->ll_pglist_gen++;
- sbi->ll_async_page_count++;
- list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
+
+ lcounter_inc(&sbi->ll_async_page_count);
+ pd = ll_pglist_lock(sbi);
+ list_add_tail(&llap->llap_pglist_item, &pd->llpd_list);
INIT_LIST_HEAD(&llap->llap_pending_write);
- spin_unlock(&sbi->ll_lock);
+ pd->llpd_count++;
+ pd->llpd_gen++;
+ pd->llpd_miss++;
+ llap->llap_pglist_cpu = pd->llpd_cpu;
+ ll_pglist_unlock(sbi);
out:
if (unlikely(sbi->ll_flags & LL_SBI_CHECKSUM)) {
RETURN(llap);
}
+struct ll_async_page *llap_from_page(struct page *page,
+ unsigned origin)
+{
+ return llap_from_page_with_lockh(page, origin, NULL);
+}
+
static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
struct ll_async_page *llap,
unsigned to, obd_flag async_flags)
if (!rc && async_flags & ASYNC_READY) {
unlock_page(llap->llap_page);
- if (PageWriteback(llap->llap_page)) {
+ if (PageWriteback(llap->llap_page))
end_page_writeback(llap->llap_page);
- }
}
if (rc == 0 && llap_write_complete(inode, llap))
int ll_commit_write(struct file *file, struct page *page, unsigned from,
unsigned to)
{
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
struct inode *inode = page->mapping->host;
struct ll_inode_info *lli = ll_i2info(inode);
struct lov_stripe_md *lsm = lli->lli_smd;
struct obd_export *exp;
struct ll_async_page *llap;
loff_t size;
+ struct lustre_handle *lockh = NULL;
int rc = 0;
ENTRY;
CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
inode, page, from, to, page->index);
- llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
+ if (fd->fd_flags & LL_FILE_GROUP_LOCKED)
+ lockh = &fd->fd_cwlockh;
+
+ llap = llap_from_page_with_lockh(page, LLAP_ORIGIN_COMMIT_WRITE, lockh);
if (IS_ERR(llap))
RETURN(PTR_ERR(llap));
RETURN(rc);
}
+static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
+
+/* WARNING: This algorithm is used to reduce the contention on
+ * sbi->ll_lock. It should work well if the ra_max_pages is much
+ * greater than the single file's read-ahead window.
+ *
+ * TODO: There may exist a `global sync problem' in this implementation.
+ * Considering the global ra window is 100M, and each file's ra window is 10M,
+ * there are over 10 files trying to get its ra budget and reach
+ * ll_ra_count_get at the exactly same time. All of them will get a zero ra
+ * window, although the global window is 100M. -jay
+ */
static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
{
struct ll_ra_info *ra = &sbi->ll_ra_info;
unsigned long ret;
ENTRY;
- spin_lock(&sbi->ll_lock);
- ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
- ra->ra_cur_pages += ret;
- spin_unlock(&sbi->ll_lock);
+ ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), len);
+ if ((int)ret < 0)
+ GOTO(out, ret = 0);
+ if (atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
+ atomic_sub(ret, &ra->ra_cur_pages);
+ ret = 0;
+ }
+out:
RETURN(ret);
}
static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
{
struct ll_ra_info *ra = &sbi->ll_ra_info;
- spin_lock(&sbi->ll_lock);
- LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
- ra->ra_cur_pages, len);
- ra->ra_cur_pages -= len;
- spin_unlock(&sbi->ll_lock);
+ atomic_sub(len, &ra->ra_cur_pages);
}
/* called for each page in a completed rpc.*/
set_bit(AS_EIO, &page->mapping->flags);
}
+ /* be carefull about clear WB.
+ * if WB will cleared after page lock is released - paralel IO can be
+ * started before ap_make_ready is finished - so we will be have page
+ * with PG_Writeback set from ->writepage() and completed READ which
+ * clear this flag */
+ if ((cmd & OBD_BRW_WRITE) && PageWriteback(page))
+ end_page_writeback(page);
+
unlock_page(page);
if (cmd & OBD_BRW_WRITE) {
ll_queue_done_writing(page->mapping->host, 0);
}
- if (PageWriteback(page)) {
- end_page_writeback(page);
- }
page_cache_release(page);
RETURN(ret);
struct obd_export *exp;
struct ll_async_page *llap;
struct ll_sb_info *sbi = ll_i2sbi(inode);
- int rc;
+ struct ll_pglist_data *pd;
+ int rc, cpu;
ENTRY;
exp = ll_i2dtexp(inode);
* is providing exclusivity to memory pressure/truncate/writeback..*/
__clear_page_ll_data(page);
- spin_lock(&sbi->ll_lock);
+ lcounter_dec(&sbi->ll_async_page_count);
+ cpu = llap->llap_pglist_cpu;
+ pd = ll_pglist_cpu_lock(sbi, cpu);
+ pd->llpd_gen++;
+ pd->llpd_count--;
if (!list_empty(&llap->llap_pglist_item))
list_del_init(&llap->llap_pglist_item);
- sbi->ll_pglist_gen++;
- sbi->ll_async_page_count--;
- spin_unlock(&sbi->ll_lock);
+ ll_pglist_cpu_unlock(sbi, cpu);
OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
EXIT;
}
* here. */
void ll_removepage(struct page *page)
{
+ struct ll_async_page *llap = llap_cast_private(page);
ENTRY;
LASSERT(!in_interrupt());
return;
}
- LASSERT(!llap_cast_private(page)->llap_lockless_io_page);
+ LASSERT(!llap->llap_lockless_io_page);
+ LASSERT(!llap->llap_nocache);
LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
__ll_put_llap(page);
EXIT;
}
-static int ll_page_matches(struct page *page, int fd_flags)
-{
- struct lustre_handle match_lockh = {0};
- struct inode *inode = page->mapping->host;
- ldlm_policy_data_t page_extent;
- int flags, matches;
- ENTRY;
-
- if (unlikely(fd_flags & LL_FILE_GROUP_LOCKED))
- RETURN(1);
-
- page_extent.l_extent.start = (__u64)page->index << CFS_PAGE_SHIFT;
- page_extent.l_extent.end =
- page_extent.l_extent.start + CFS_PAGE_SIZE - 1;
- flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
- if (!(fd_flags & LL_FILE_READAHEAD))
- flags |= LDLM_FL_CBPENDING;
- matches = obd_match(ll_i2sbi(inode)->ll_dt_exp,
- ll_i2info(inode)->lli_smd, LDLM_EXTENT,
- &page_extent, LCK_PR | LCK_PW, &flags, inode,
- &match_lockh);
- RETURN(matches);
-}
-
static int ll_issue_page_read(struct obd_export *exp,
struct ll_async_page *llap,
struct obd_io_group *oig, int defer)
RETURN(rc);
}
-static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
+static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
{
LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
- ra->ra_stats[which]++;
+ lprocfs_counter_incr(sbi->ll_ra_stats, which);
}
static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
{
struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
- struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
-
- spin_lock(&sbi->ll_lock);
- ll_ra_stats_inc_unlocked(ra, which);
- spin_unlock(&sbi->ll_lock);
+ ll_ra_stats_inc_sbi(sbi, which);
}
void ll_ra_accounting(struct ll_async_page *llap, struct address_space *mapping)
unsigned long off, unsigned length)
{
unsigned long cont_len = st_off > off ? st_off - off : 0;
- unsigned long stride_len = length + off > st_off ?
+ __u64 stride_len = length + off > st_off ?
length + off + 1 - st_off : 0;
unsigned long left, pg_count;
ras->ras_next_readahead = max(end, end + 1);
RAS_CDEBUG(ras);
}
- ria.ria_start = start;
- ria.ria_end = end;
- /* If stride I/O mode is detected, get stride window*/
- if (stride_io_mode(ras)) {
- ria.ria_length = ras->ras_stride_length;
- ria.ria_pages = ras->ras_stride_pages;
- }
+ ria.ria_start = start;
+ ria.ria_end = end;
+ /* If stride I/O mode is detected, get stride window*/
+ if (stride_io_mode(ras)) {
+ ria.ria_stoff = ras->ras_stride_offset;
+ ria.ria_length = ras->ras_stride_length;
+ ria.ria_pages = ras->ras_stride_pages;
+ }
spin_unlock(&ras->ras_lock);
if (end == 0) {
reserved = ll_ra_count_get(ll_i2sbi(inode), len);
- if (reserved < end - start + 1)
+ if (reserved < len)
ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
CDEBUG(D_READA, "reserved page %lu \n", reserved);
int zero = 0, stride_zero = 0, stride_detect = 0, ra_miss = 0;
ENTRY;
- spin_lock(&sbi->ll_lock);
spin_lock(&ras->ras_lock);
- ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
+ ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS);
/* reset the read-ahead window in two cases. First when the app seeks
* or reads to some other part of the file. Secondly if we get a
* reclaiming it before we get to it. */
if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
zero = 1;
- ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
+ ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE);
/* check whether it is in stride I/O mode*/
if (!index_in_stride_window(index, ras, inode))
stride_zero = 1;
* stride I/O mode to avoid complication */
if (!stride_io_mode(ras))
stride_zero = 1;
- ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
+ ll_ra_stats_inc_sbi(sbi, RA_STAT_MISS_IN_WINDOW);
}
/* On the second access to a file smaller than the tunable
RAS_CDEBUG(ras);
ras->ras_request_index++;
spin_unlock(&ras->ras_lock);
- spin_unlock(&sbi->ll_lock);
return;
}
if (IS_ERR(llap))
GOTO(out, rc = PTR_ERR(llap));
+ LASSERT(!llap->llap_nocache);
LASSERT(!PageWriteback(page));
set_page_writeback(page);
rc = queue_or_sync_write(exp, inode, llap, CFS_PAGE_SIZE,
ASYNC_READY | ASYNC_URGENT);
}
- if (rc)
- page_cache_release(page);
-out:
if (rc) {
- if (!lli->lli_async_rc)
- lli->lli_async_rc = rc;
/* re-dirty page on error so it retries write */
- if (PageWriteback(page)) {
+ if (PageWriteback(page))
end_page_writeback(page);
- }
+
/* resend page only for not started IO*/
if (!PageError(page))
ll_redirty_page(page);
+
+ page_cache_release(page);
+ }
+out:
+ if (rc) {
+ if (!lli->lli_async_rc)
+ lli->lli_async_rc = rc;
+ /* resend page only for not started IO*/
unlock_page(page);
}
RETURN(rc);
struct obd_export *exp;
struct ll_async_page *llap;
struct obd_io_group *oig = NULL;
+ struct lustre_handle *lockh = NULL;
int rc;
ENTRY;
if (exp == NULL)
GOTO(out, rc = -EINVAL);
- llap = llap_from_page(page, LLAP_ORIGIN_READPAGE);
- if (IS_ERR(llap))
+ if (fd->fd_flags & LL_FILE_GROUP_LOCKED)
+ lockh = &fd->fd_cwlockh;
+
+ llap = llap_from_page_with_lockh(page, LLAP_ORIGIN_READPAGE, lockh);
+ if (IS_ERR(llap)) {
+ if (PTR_ERR(llap) == -ENOLCK) {
+ CWARN("ino %lu page %lu (%llu) not covered by "
+ "a lock (mmap?). check debug logs.\n",
+ inode->i_ino, page->index,
+ (long long)page->index << PAGE_CACHE_SHIFT);
+ }
GOTO(out, rc = PTR_ERR(llap));
+ }
if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
ras_update(ll_i2sbi(inode), inode, &fd->fd_ras, page->index,
GOTO(out_oig, rc = 0);
}
- if (likely((fd->fd_flags & LL_FILE_IGNORE_LOCK) == 0)) {
- rc = ll_page_matches(page, fd->fd_flags);
- if (rc < 0) {
- LL_CDEBUG_PAGE(D_ERROR, page,
- "lock match failed: rc %d\n", rc);
- GOTO(out, rc);
- }
-
- if (rc == 0) {
- CWARN("ino %lu page %lu (%llu) not covered by "
- "a lock (mmap?). check debug logs.\n",
- inode->i_ino, page->index,
- (long long)page->index << CFS_PAGE_SHIFT);
- }
- }
-
rc = ll_issue_page_read(exp, llap, oig, 0);
if (rc)
GOTO(out, rc);
LL_CDEBUG_PAGE(D_PAGE, pages[i], "offset "LPU64","
" from %u, bytes = %u\n",
- pos, from, bytes);
+ (__u64)pos, from, bytes);
LASSERTF(pos >> CFS_PAGE_SHIFT == pages[i]->index,
"wrong page index %lu (%lu)\n",
pages[i]->index,
rc = generic_write_checks(file, ppos, &count, 0);
if (rc)
GOTO(out, rc);
- rc = remove_suid(file->f_dentry);
+ rc = ll_remove_suid(file->f_dentry, file->f_vfsmnt);
if (rc)
GOTO(out, rc);
}