1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 * Copyright (C) 2002, 2003 Cluster File Systems, Inc
23 * this started as an implementation of an io daemon that woke regularly
24 * to force writeback.. the throttling in prepare_write and kupdate's usual
25 * writeback pressure got rid of our thread, but the file name remains.
28 #include <linux/version.h>
29 #include <linux/config.h>
30 #include <linux/module.h>
32 #include <linux/stat.h>
33 #include <linux/sched.h>
34 #include <linux/smp_lock.h>
35 #include <linux/kmod.h>
36 #include <linux/pagemap.h>
38 #include <linux/rbtree.h>
39 #include <linux/seq_file.h>
40 #include <linux/time.h>
42 /* PG_inactive_clean is shorthand for rmap, we want free_high/low here.. */
43 #ifdef PG_inactive_clean
44 #include <linux/mm_inline.h>
47 #define DEBUG_SUBSYSTEM S_LLITE
48 #include <linux/lustre_lite.h>
50 #ifndef list_for_each_prev_safe
51 #define list_for_each_prev_safe(pos, n, head) \
52 for (pos = (head)->prev, n = pos->prev; pos != (head); \
53 pos = n, n = pos->prev )
56 extern spinlock_t inode_lock;
58 struct ll_writeback_pages {
64 * check to see if we're racing with truncate and put the page in
65 * the brw_page array. returns 0 if there is more room and 1
66 * if the array is full.
68 static int llwp_consume_page(struct ll_writeback_pages *llwp,
69 struct inode *inode, struct page *page)
71 obd_off off = ((obd_off)page->index) << PAGE_SHIFT;
74 /* we raced with truncate? */
75 if ( off >= inode->i_size ) {
76 ll_remove_dirty(inode, page->index, page->index);
82 pg = &llwp->pga[llwp->npgs];
84 LASSERT(llwp->npgs <= llwp->max);
88 pg->flag = OBD_BRW_CREATE;
89 pg->count = PAGE_CACHE_SIZE;
91 /* catch partial writes for files that end mid-page */
92 if (pg->off + pg->count > inode->i_size)
93 pg->count = inode->i_size & ~PAGE_CACHE_MASK;
96 * matches ptlrpc_bulk_get assert that trickles down
97 * from a 0 page length going through niobuf and into
98 * the buffer regions being posted
100 LASSERT(pg->count >= 0);
102 CDEBUG(D_CACHE, "brw_page %p: off "LPU64" cnt %d, page %p: ind %ld"
103 " i_size: %llu\n", pg, pg->off, pg->count, page,
104 page->index, inode->i_size);
106 return llwp->npgs == llwp->max;
110 * returns the number of pages that it added to the pgs array
112 * this duplicates filemap_fdatasync and gives us an opportunity to grab lots
115 static void ll_get_dirty_pages(struct inode *inode,
116 struct ll_writeback_pages *llwp)
118 struct address_space *mapping = inode->i_mapping;
120 struct list_head *pos, *n;
123 PGCACHE_WRLOCK(mapping);
125 list_for_each_prev_safe(pos, n, &mapping->dirty_pages) {
126 page = list_entry(pos, struct page, list);
128 if (TryLockPage(page))
131 list_del(&page->list);
132 list_add(&page->list, &mapping->locked_pages);
134 if ( ! PageDirty(page) ) {
138 ClearPageDirty(page);
140 if ( llwp_consume_page(llwp, inode, page) != 0)
144 PGCACHE_WRUNLOCK(mapping);
148 static void ll_writeback(struct inode *inode, struct ll_writeback_pages *llwp)
151 struct ptlrpc_request_set *set;
154 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),bytes=%u\n",
155 inode->i_ino, inode->i_generation, inode,
156 ((llwp->npgs-1) << PAGE_SHIFT) + llwp->pga[llwp->npgs-1].count);
158 set = ptlrpc_prep_set();
160 CERROR ("Can't create request set\n");
163 rc = obd_brw_async(OBD_BRW_WRITE, ll_i2obdconn(inode),
164 ll_i2info(inode)->lli_smd, llwp->npgs,
165 llwp->pga, set, NULL);
167 rc = ptlrpc_set_wait (set);
168 ptlrpc_set_destroy (set);
171 * b=1038, we need to pass _brw errors up so that writeback
172 * doesn't get stuck in recovery leaving processes stuck in
173 * D waiting for pages
176 CERROR("error from obd_brw_async: rc = %d\n", rc);
177 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
178 LPROC_LL_WB_FAIL, llwp->npgs);
180 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
181 LPROC_LL_WB_OK, (llwp->npgs));
183 for (i = 0 ; i < llwp->npgs ; i++) {
184 struct page *page = llwp->pga[i].pg;
186 CDEBUG(D_CACHE, "finished page %p at index %lu\n", page,
188 LASSERT(PageLocked(page));
189 ll_remove_dirty(inode, page->index, page->index);
191 page_cache_release(page);
197 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
199 #ifndef PG_inactive_clean
200 #ifdef CONFIG_DISCONTIGMEM
201 #error "sorry, we don't support DISCONTIGMEM yet"
205 * __alloc_pages marks a zone as needing balancing if an allocation is
206 * performed when the zone has fewer free pages than its 'low' water
207 * mark. its cleared when try_to_free_pages makes progress.
209 static int zones_need_balancing(void)
215 for ( pgdat = pgdat_list ; pgdat != NULL ; pgdat = pgdat->node_next ) {
216 for ( i = pgdat->nr_zones-1 ; i >= 0 ; i-- ) {
217 zone = &pgdat->node_zones[i];
219 if ( zone->need_balance )
226 /* 2.4 doesn't give us a way to find out how many pages we have
227 * cached 'cause we're not using buffer_heads. we are very
228 * conservative here and flush the superblock of all dirty data
229 * when the vm (rmap or stock) thinks that it is running low
230 * and kswapd would have done work. kupdated isn't good enough
231 * because writers (dbench) can dirty _very quickly_, and we
232 * allocate under writepage..
234 * 2.5 gets this right, see the {inc,dec}_page_state(nr_dirty, )
236 static int should_writeback(void)
238 #ifdef PG_inactive_clean
239 if (free_high(ALL_ZONES) > 0 || free_low(ANY_ZONE) > 0)
241 if (zones_need_balancing())
247 static int ll_alloc_brw(struct inode *inode, struct ll_writeback_pages *llwp)
249 memset(llwp, 0, sizeof(struct ll_writeback_pages));
251 llwp->max = inode->i_blksize >> PAGE_CACHE_SHIFT;
252 if (llwp->max == 0) {
253 CERROR("forcing llwp->max to 1. blksize: %lu\n",
257 llwp->pga = kmalloc(llwp->max * sizeof(*llwp->pga), GFP_ATOMIC);
258 if (llwp->pga == NULL)
263 int ll_check_dirty(struct super_block *sb)
265 unsigned long old_flags; /* hack? */
271 if (!should_writeback())
274 old_flags = current->flags;
275 current->flags |= PF_MEMALLOC;
277 spin_lock(&inode_lock);
280 * first we try and write back dirty pages from dirty inodes
281 * until the VM thinkgs we're ok again..
284 struct ll_writeback_pages llwp;
285 struct list_head *pos;
289 list_for_each_prev(pos, &sb->s_dirty) {
290 inode = list_entry(pos, struct inode, i_list);
292 if (!(inode->i_state & I_DIRTY_PAGES)) {
302 /* duplicate __sync_one, *sigh* */
303 list_del(&inode->i_list);
304 list_add(&inode->i_list, &inode->i_sb->s_locked_inodes);
305 inode->i_state |= I_LOCK;
306 inode->i_state &= ~I_DIRTY_PAGES;
308 spin_unlock(&inode_lock);
310 rc = ll_alloc_brw(inode, &llwp);
316 ll_get_dirty_pages(inode, &llwp);
318 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
319 LPROC_LL_WB_PRESSURE,
321 ll_writeback(inode, &llwp);
325 } while (llwp.npgs && should_writeback());
327 spin_lock(&inode_lock);
329 if (!list_empty(&inode->i_mapping->dirty_pages))
330 inode->i_state |= I_DIRTY_PAGES;
332 inode->i_state &= ~I_LOCK;
334 * we are sneaky and leave the inode on the dirty list,
335 * even though it might not still be..
337 if (!(inode->i_state & I_FREEING)) {
338 list_del(&inode->i_list);
339 list_add(&inode->i_list, &inode->i_sb->s_dirty);
341 wake_up(&inode->i_wait);
343 } while (making_progress && should_writeback());
346 * and if that didn't work, we sleep on any data that might
347 * be under writeback..
349 while (should_writeback()) {
350 if (list_empty(&sb->s_locked_inodes))
353 inode = list_entry(sb->s_locked_inodes.next, struct inode,
356 atomic_inc(&inode->i_count); /* XXX hack? */
357 spin_unlock(&inode_lock);
358 wait_event(inode->i_wait, !(inode->i_state & I_LOCK));
360 spin_lock(&inode_lock);
363 spin_unlock(&inode_lock);
366 current->flags = old_flags;
370 #endif /* linux 2.5 */
372 int ll_batch_writepage(struct inode *inode, struct page *page)
374 unsigned long old_flags; /* hack? */
375 struct ll_writeback_pages llwp;
379 old_flags = current->flags;
380 current->flags |= PF_MEMALLOC;
381 rc = ll_alloc_brw(inode, &llwp);
385 if (llwp_consume_page(&llwp, inode, page) == 0)
386 ll_get_dirty_pages(inode, &llwp);
389 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
390 LPROC_LL_WB_WRITEPAGE, llwp.npgs);
391 ll_writeback(inode, &llwp);
396 current->flags = old_flags;
401 * we aggressively track offsets of pages that have been dirtied. we need this
402 * to make file size decisions around lock acquisition and cancelation. all
403 * extents include the offsets at their endpoints.
405 struct offset_extent {
407 unsigned long oe_start, oe_end;
410 static struct offset_extent *ll_find_oe(rb_root_t *root,
411 struct offset_extent *needle)
413 struct rb_node_s *node = root->rb_node;
414 struct offset_extent *oe;
417 CDEBUG(D_INODE, "searching [%lu -> %lu]\n", needle->oe_start,
421 oe = rb_entry(node, struct offset_extent, oe_node);
422 if (needle->oe_end < oe->oe_start)
423 node = node->rb_left;
424 else if (needle->oe_start > oe->oe_end)
425 node = node->rb_right;
427 CDEBUG(D_INODE, "returning [%lu -> %lu]\n",
428 oe->oe_start, oe->oe_end);
435 /* do the rbtree mechanics to insert a node, callers are responsible
436 * for making sure that this new node doesn't overlap with existing
438 static void ll_insert_oe(rb_root_t *root, struct offset_extent *new_oe)
440 rb_node_t ** p = &root->rb_node;
441 rb_node_t * parent = NULL;
442 struct offset_extent *oe;
445 LASSERT(new_oe->oe_start <= new_oe->oe_end);
449 oe = rb_entry(parent, struct offset_extent, oe_node);
450 if ( new_oe->oe_end < oe->oe_start )
452 else if ( new_oe->oe_start > oe->oe_end )
457 rb_link_node(&new_oe->oe_node, parent, p);
458 rb_insert_color(&new_oe->oe_node, root);
462 static inline void lldo_dirty_add(struct inode *inode,
463 struct ll_dirty_offsets *lldo,
466 lldo->do_num_dirty += val;
467 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats, LPROC_LL_DIRTY_PAGES,
471 void ll_record_dirty(struct inode *inode, unsigned long offset)
473 struct ll_dirty_offsets *lldo = &ll_i2info(inode)->lli_dirty;
474 struct offset_extent needle, *oe, *new_oe;
478 /* will allocate more intelligently later */
479 OBD_ALLOC(new_oe, sizeof(*new_oe));
480 LASSERT(new_oe); /* will have to do for now :/ */
482 spin_lock(&lldo->do_lock);
484 /* find neighbours that we might glom on to */
485 needle.oe_start = (offset > 0) ? offset - 1 : offset;
486 needle.oe_end = (offset < ~0) ? offset + 1 : offset;
487 oe = ll_find_oe(&lldo->do_root, &needle);
489 new_oe->oe_start = offset;
490 new_oe->oe_end = offset;
491 ll_insert_oe(&lldo->do_root, new_oe);
492 lldo_dirty_add(inode, lldo, 1);
497 /* already recorded */
498 if ( offset >= oe->oe_start && offset <= oe->oe_end )
501 /* ok, need to check for adjacent neighbours */
502 needle.oe_start = offset;
503 needle.oe_end = offset;
504 if (ll_find_oe(&lldo->do_root, &needle))
507 /* ok, its safe to extend the oe we found */
508 if ( offset == oe->oe_start - 1 )
510 else if ( offset == oe->oe_end + 1 )
514 lldo_dirty_add(inode, lldo, 1);
517 CDEBUG(D_INODE, "%lu now dirty\n", lldo->do_num_dirty);
518 spin_unlock(&lldo->do_lock);
520 OBD_FREE(new_oe, sizeof(*new_oe));
525 void ll_remove_dirty(struct inode *inode, unsigned long start,
528 struct ll_dirty_offsets *lldo = &ll_i2info(inode)->lli_dirty;
529 struct offset_extent needle, *oe, *new_oe;
532 /* will allocate more intelligently later */
533 OBD_ALLOC(new_oe, sizeof(*new_oe));
534 LASSERT(new_oe); /* will have to do for now :/ */
536 needle.oe_start = start;
539 spin_lock(&lldo->do_lock);
540 for ( ; (oe = ll_find_oe(&lldo->do_root, &needle)) ; ) {
542 /* see if we're punching a hole and need to create a node */
543 if (oe->oe_start < start && oe->oe_end > end) {
544 new_oe->oe_start = end + 1;
545 new_oe->oe_end = oe->oe_end;
546 oe->oe_end = start - 1;
547 ll_insert_oe(&lldo->do_root, new_oe);
549 lldo_dirty_add(inode, lldo, -(end - start + 1));
553 /* overlapping edges */
554 if (oe->oe_start < start && oe->oe_end <= end) {
555 lldo_dirty_add(inode, lldo, -(oe->oe_end - start + 1));
556 oe->oe_end = start - 1;
560 if (oe->oe_end > end && oe->oe_start >= start) {
561 lldo_dirty_add(inode, lldo, -(end - oe->oe_start + 1));
562 oe->oe_start = end + 1;
567 /* an extent entirely within the one we're clearing */
568 rb_erase(&oe->oe_node, &lldo->do_root);
569 lldo_dirty_add(inode, lldo, -(oe->oe_end - oe->oe_start + 1));
570 spin_unlock(&lldo->do_lock);
571 OBD_FREE(oe, sizeof(*oe));
572 spin_lock(&lldo->do_lock);
574 CDEBUG(D_INODE, "%lu now dirty\n", lldo->do_num_dirty);
575 spin_unlock(&lldo->do_lock);
577 OBD_FREE(new_oe, sizeof(*new_oe));
581 int ll_find_dirty(struct ll_dirty_offsets *lldo, unsigned long *start,
584 struct offset_extent needle, *oe;
588 needle.oe_start = *start;
589 needle.oe_end = *end;
591 spin_lock(&lldo->do_lock);
592 oe = ll_find_oe(&lldo->do_root, &needle);
594 *start = oe->oe_start;
598 spin_unlock(&lldo->do_lock);
603 int ll_farthest_dirty(struct ll_dirty_offsets *lldo, unsigned long *farthest)
605 struct rb_node_s *last, *node;
606 struct offset_extent *oe;
610 spin_lock(&lldo->do_lock);
611 for (node = lldo->do_root.rb_node, last = NULL;
613 last = node, node = node->rb_right)
617 oe = rb_entry(last, struct offset_extent, oe_node);
618 *farthest = oe->oe_end;
621 spin_unlock(&lldo->do_lock);
625 void ll_lldo_init(struct ll_dirty_offsets *lldo)
627 spin_lock_init(&lldo->do_lock);
628 lldo->do_num_dirty = 0;
629 lldo->do_root.rb_node = NULL;