1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 * Copyright (C) 2002, 2003 Cluster File Systems, Inc
23 * this started as an implementation of an io daemon that woke regularly
24 * to force writeback.. the throttling in prepare_write and kupdate's usual
25 * writeback pressure got rid of our thread, but the file name remains.
28 #include <linux/version.h>
29 #include <linux/config.h>
30 #include <linux/module.h>
32 #include <linux/stat.h>
33 #include <linux/sched.h>
34 #include <linux/smp_lock.h>
35 #include <linux/kmod.h>
36 #include <linux/pagemap.h>
38 #include <linux/rbtree.h>
39 #include <linux/seq_file.h>
40 #include <linux/time.h>
41 #include "llite_internal.h"
43 /* PG_inactive_clean is shorthand for rmap, we want free_high/low here.. */
44 #ifdef PG_inactive_clean
45 #include <linux/mm_inline.h>
48 #define DEBUG_SUBSYSTEM S_LLITE
49 #include <linux/lustre_lite.h>
51 #ifndef list_for_each_prev_safe
52 #define list_for_each_prev_safe(pos, n, head) \
53 for (pos = (head)->prev, n = pos->prev; pos != (head); \
54 pos = n, n = pos->prev )
57 extern spinlock_t inode_lock;
59 struct ll_writeback_pages {
65 * check to see if we're racing with truncate and put the page in
66 * the brw_page array. returns 0 if there is more room and 1
67 * if the array is full.
69 static int llwp_consume_page(struct ll_writeback_pages *llwp,
70 struct inode *inode, struct page *page)
72 obd_off off = ((obd_off)page->index) << PAGE_SHIFT;
75 /* we raced with truncate? */
76 if ( off >= inode->i_size ) {
78 rc = ll_clear_dirty_pages(ll_i2obdconn(inode),
79 ll_i2info(inode)->lli_smd,
80 page->index, page->index);
83 CDEBUG(D_CACHE, "offset "LPU64" (index %lu) > i_size %llu\n",
84 off, page->index, inode->i_size);
90 pg = &llwp->pga[llwp->npgs];
92 LASSERT(llwp->npgs <= llwp->max);
96 pg->flag = OBD_BRW_CREATE|OBD_BRW_FROM_GRANT;
97 pg->count = PAGE_CACHE_SIZE;
99 /* catch partial writes for files that end mid-page */
100 if (pg->off + pg->count > inode->i_size)
101 pg->count = inode->i_size & ~PAGE_CACHE_MASK;
104 * matches ptlrpc_bulk_get assert that trickles down
105 * from a 0 page length going through niobuf and into
106 * the buffer regions being posted
108 LASSERT(pg->count >= 0);
110 CDEBUG(D_CACHE, "brw_page %p: off "LPU64" cnt %d, page %p: ind %ld"
111 " i_size: %llu\n", pg, pg->off, pg->count, page,
112 page->index, inode->i_size);
114 return llwp->npgs == llwp->max;
118 * returns the number of pages that it added to the pgs array
120 * this duplicates filemap_fdatasync and gives us an opportunity to grab lots
123 static void ll_get_dirty_pages(struct inode *inode,
124 struct ll_writeback_pages *llwp)
126 struct address_space *mapping = inode->i_mapping;
128 struct list_head *pos, *n;
131 PGCACHE_WRLOCK(mapping);
133 list_for_each_prev_safe(pos, n, &mapping->dirty_pages) {
134 page = list_entry(pos, struct page, list);
136 if (TryLockPage(page))
139 list_del(&page->list);
140 list_add(&page->list, &mapping->locked_pages);
142 if ( ! PageDirty(page) ) {
146 ClearPageDirty(page);
148 if ( llwp_consume_page(llwp, inode, page) != 0)
152 PGCACHE_WRUNLOCK(mapping);
156 static void ll_writeback(struct inode *inode, struct ll_writeback_pages *llwp)
159 struct ptlrpc_request_set *set;
162 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),bytes=%u\n",
163 inode->i_ino, inode->i_generation, inode,
164 ((llwp->npgs-1) << PAGE_SHIFT) + llwp->pga[llwp->npgs-1].count);
166 set = ptlrpc_prep_set();
168 CERROR ("Can't create request set\n");
171 rc = obd_brw_async(OBD_BRW_WRITE, ll_i2obdconn(inode),
172 ll_i2info(inode)->lli_smd, llwp->npgs,
173 llwp->pga, set, NULL);
175 rc = ptlrpc_set_wait (set);
176 ptlrpc_set_destroy (set);
179 * b=1038, we need to pass _brw errors up so that writeback
180 * doesn't get stuck in recovery leaving processes stuck in
181 * D waiting for pages
184 CERROR("error from obd_brw_async: rc = %d\n", rc);
185 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
186 LPROC_LL_WB_FAIL, llwp->npgs);
188 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
189 LPROC_LL_WB_OK, (llwp->npgs));
192 for (i = 0 ; i < llwp->npgs ; i++) {
193 struct page *page = llwp->pga[i].pg;
195 CDEBUG(D_CACHE, "finished page %p at index %lu\n", page,
197 LASSERT(PageLocked(page));
199 rc = ll_clear_dirty_pages(ll_i2obdconn(inode),
200 ll_i2info(inode)->lli_smd,
201 page->index, page->index);
204 page_cache_release(page);
210 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
212 #ifndef PG_inactive_clean
213 #ifdef CONFIG_DISCONTIGMEM
214 #error "sorry, we don't support DISCONTIGMEM yet"
218 * __alloc_pages marks a zone as needing balancing if an allocation is
219 * performed when the zone has fewer free pages than its 'low' water
220 * mark. its cleared when try_to_free_pages makes progress.
222 static int zones_need_balancing(void)
228 for ( pgdat = pgdat_list ; pgdat != NULL ; pgdat = pgdat->node_next ) {
229 for ( i = pgdat->nr_zones-1 ; i >= 0 ; i-- ) {
230 zone = &pgdat->node_zones[i];
232 if ( zone->need_balance )
239 /* 2.4 doesn't give us a way to find out how many pages we have
240 * cached 'cause we're not using buffer_heads. we are very
241 * conservative here and flush the superblock of all dirty data
242 * when the vm (rmap or stock) thinks that it is running low
243 * and kswapd would have done work. kupdated isn't good enough
244 * because writers (dbench) can dirty _very quickly_, and we
245 * allocate under writepage..
247 * 2.5 gets this right, see the {inc,dec}_page_state(nr_dirty, )
249 static int should_writeback(void)
251 #ifdef PG_inactive_clean
252 if (free_high(ALL_ZONES) > 0 || free_low(ANY_ZONE) > 0)
254 if (zones_need_balancing())
260 static int ll_alloc_brw(struct inode *inode, struct ll_writeback_pages *llwp)
262 memset(llwp, 0, sizeof(struct ll_writeback_pages));
264 llwp->max = inode->i_blksize >> PAGE_CACHE_SHIFT;
265 if (llwp->max == 0) {
266 CERROR("forcing llwp->max to 1. blksize: %lu\n",
270 llwp->pga = kmalloc(llwp->max * sizeof(*llwp->pga), GFP_ATOMIC);
271 if (llwp->pga == NULL)
276 int ll_check_dirty(struct super_block *sb)
278 unsigned long old_flags; /* hack? */
284 if (!should_writeback())
287 old_flags = current->flags;
288 current->flags |= PF_MEMALLOC;
290 spin_lock(&inode_lock);
293 * first we try and write back dirty pages from dirty inodes
294 * until the VM thinkgs we're ok again..
297 struct ll_writeback_pages llwp;
298 struct list_head *pos;
302 list_for_each_prev(pos, &sb->s_dirty) {
303 inode = list_entry(pos, struct inode, i_list);
305 if (!(inode->i_state & I_DIRTY_PAGES)) {
315 /* duplicate __sync_one, *sigh* */
316 list_del(&inode->i_list);
317 list_add(&inode->i_list, &inode->i_sb->s_locked_inodes);
318 inode->i_state |= I_LOCK;
319 inode->i_state &= ~I_DIRTY_PAGES;
321 spin_unlock(&inode_lock);
323 rc = ll_alloc_brw(inode, &llwp);
329 ll_get_dirty_pages(inode, &llwp);
331 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
332 LPROC_LL_WB_PRESSURE,
334 ll_writeback(inode, &llwp);
338 } while (llwp.npgs && should_writeback());
340 spin_lock(&inode_lock);
342 if (!list_empty(&inode->i_mapping->dirty_pages))
343 inode->i_state |= I_DIRTY_PAGES;
345 inode->i_state &= ~I_LOCK;
347 * we are sneaky and leave the inode on the dirty list,
348 * even though it might not still be..
350 if (!(inode->i_state & I_FREEING)) {
351 list_del(&inode->i_list);
352 list_add(&inode->i_list, &inode->i_sb->s_dirty);
354 wake_up(&inode->i_wait);
356 } while (making_progress && should_writeback());
359 * and if that didn't work, we sleep on any data that might
360 * be under writeback..
362 while (should_writeback()) {
363 if (list_empty(&sb->s_locked_inodes))
366 inode = list_entry(sb->s_locked_inodes.next, struct inode,
369 atomic_inc(&inode->i_count); /* XXX hack? */
370 spin_unlock(&inode_lock);
371 wait_event(inode->i_wait, !(inode->i_state & I_LOCK));
373 spin_lock(&inode_lock);
376 spin_unlock(&inode_lock);
379 current->flags = old_flags;
383 #endif /* linux 2.5 */
385 int ll_batch_writepage(struct inode *inode, struct page *page)
387 unsigned long old_flags; /* hack? */
388 struct ll_writeback_pages llwp;
392 old_flags = current->flags;
393 current->flags |= PF_MEMALLOC;
394 rc = ll_alloc_brw(inode, &llwp);
396 GOTO(restore_flags, rc);
398 if (llwp_consume_page(&llwp, inode, page) == 0)
399 ll_get_dirty_pages(inode, &llwp);
402 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
403 LPROC_LL_WB_WRITEPAGE, llwp.npgs);
404 ll_writeback(inode, &llwp);
409 current->flags = old_flags;