Whamcloud - gitweb
merge b_devel into HEAD (20030703)
[fs/lustre-release.git] / lustre / llite / iod.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  *  Copyright (C) 2002, 2003  Cluster File Systems, Inc
22  *
23  *  this started as an implementation of an io daemon that woke regularly
24  *  to force writeback.. the throttling in prepare_write and kupdate's usual
25  *  writeback pressure got rid of our thread, but the file name remains.
26  */
27
28 #include <linux/version.h>
29 #include <linux/config.h>
30 #include <linux/module.h>
31 #include <linux/fs.h>
32 #include <linux/stat.h>
33 #include <linux/sched.h>
34 #include <linux/smp_lock.h>
35 #include <linux/kmod.h>
36 #include <linux/pagemap.h>
37 #include <linux/mm.h>
38 #include <linux/rbtree.h>
39 #include <linux/seq_file.h>
40 #include <linux/time.h>
41 #include "llite_internal.h"
42
43 /* PG_inactive_clean is shorthand for rmap, we want free_high/low here.. */
44 #ifdef PG_inactive_clean
45 #include <linux/mm_inline.h>
46 #endif
47
48 #define DEBUG_SUBSYSTEM S_LLITE
49 #include <linux/lustre_lite.h>
50
51 #ifndef list_for_each_prev_safe
52 #define list_for_each_prev_safe(pos, n, head) \
53         for (pos = (head)->prev, n = pos->prev; pos != (head); \
54                 pos = n, n = pos->prev )
55 #endif
56
57 extern spinlock_t inode_lock;
58
59 struct ll_writeback_pages {
60         obd_count npgs, max;
61         struct brw_page *pga;
62 };
63
64 /*
65  * check to see if we're racing with truncate and put the page in
66  * the brw_page array.  returns 0 if there is more room and 1
67  * if the array is full.
68  */
69 static int llwp_consume_page(struct ll_writeback_pages *llwp,
70                              struct inode *inode, struct page *page)
71 {
72         obd_off off = ((obd_off)page->index) << PAGE_SHIFT;
73         struct brw_page *pg;
74
75         /* we raced with truncate? */
76         if ( off >= inode->i_size ) {
77                 int rc;
78                 rc = ll_clear_dirty_pages(ll_i2obdconn(inode),
79                                           ll_i2info(inode)->lli_smd,
80                                           page->index, page->index);
81
82                 LASSERT(rc == 0);
83                 CDEBUG(D_CACHE, "offset "LPU64" (index %lu) > i_size %llu\n",
84                        off, page->index, inode->i_size);
85                 unlock_page(page);
86                 return 0;
87         }
88
89         page_cache_get(page);
90         pg = &llwp->pga[llwp->npgs];
91         llwp->npgs++;
92         LASSERT(llwp->npgs <= llwp->max);
93
94         pg->pg = page;
95         pg->off = off;
96         pg->flag = OBD_BRW_CREATE|OBD_BRW_FROM_GRANT;
97         pg->count = PAGE_CACHE_SIZE;
98
99         /* catch partial writes for files that end mid-page */
100         if (pg->off + pg->count > inode->i_size)
101                 pg->count = inode->i_size & ~PAGE_CACHE_MASK;
102
103         /*
104          * matches ptlrpc_bulk_get assert that trickles down
105          * from a 0 page length going through niobuf and into
106          * the buffer regions being posted
107          */
108         LASSERT(pg->count >= 0);
109
110         CDEBUG(D_CACHE, "brw_page %p: off "LPU64" cnt %d, page %p: ind %ld"
111                         " i_size: %llu\n", pg, pg->off, pg->count, page,
112                         page->index, inode->i_size);
113
114         return llwp->npgs == llwp->max;
115 }
116
117 /*
118  * returns the number of pages that it added to the pgs array
119  *
120  * this duplicates filemap_fdatasync and gives us an opportunity to grab lots
121  * of dirty pages..
122  */
123 static void ll_get_dirty_pages(struct inode *inode,
124                                struct ll_writeback_pages *llwp)
125 {
126         struct address_space *mapping = inode->i_mapping;
127         struct page *page;
128         struct list_head *pos, *n;
129         ENTRY;
130
131         PGCACHE_WRLOCK(mapping);
132
133         list_for_each_prev_safe(pos, n, &mapping->dirty_pages) {
134                 page = list_entry(pos, struct page, list);
135
136                 if (TryLockPage(page))
137                         continue;
138
139                 list_del(&page->list);
140                 list_add(&page->list, &mapping->locked_pages);
141
142                 if ( ! PageDirty(page) ) {
143                         unlock_page(page);
144                         continue;
145                 }
146                 ClearPageDirty(page);
147
148                 if ( llwp_consume_page(llwp, inode, page) != 0)
149                         break;
150         }
151
152         PGCACHE_WRUNLOCK(mapping);
153         EXIT;
154 }
155
156 static void ll_writeback(struct inode *inode, struct ll_writeback_pages *llwp)
157 {
158         int rc, i;
159         struct ptlrpc_request_set *set;
160         ENTRY;
161
162         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),bytes=%u\n",
163                inode->i_ino, inode->i_generation, inode,
164                ((llwp->npgs-1) << PAGE_SHIFT) + llwp->pga[llwp->npgs-1].count);
165
166         set = ptlrpc_prep_set();
167         if (set == NULL) {
168                 CERROR ("Can't create request set\n");
169                 rc = -ENOMEM;
170         } else {
171                 rc = obd_brw_async(OBD_BRW_WRITE, ll_i2obdconn(inode),
172                                    ll_i2info(inode)->lli_smd, llwp->npgs,
173                                    llwp->pga, set, NULL);
174                 if (rc == 0)
175                         rc = ptlrpc_set_wait (set);
176                 ptlrpc_set_destroy (set);
177         }
178         /*
179          * b=1038, we need to pass _brw errors up so that writeback
180          * doesn't get stuck in recovery leaving processes stuck in
181          * D waiting for pages
182          */
183         if (rc) {
184                 CERROR("error from obd_brw_async: rc = %d\n", rc);
185                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
186                                     LPROC_LL_WB_FAIL, llwp->npgs);
187         } else {
188                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
189                                     LPROC_LL_WB_OK, (llwp->npgs));
190         }
191
192         for (i = 0 ; i < llwp->npgs ; i++) {
193                 struct page *page = llwp->pga[i].pg;
194
195                 CDEBUG(D_CACHE, "finished page %p at index %lu\n", page,
196                        page->index);
197                 LASSERT(PageLocked(page));
198
199                 rc = ll_clear_dirty_pages(ll_i2obdconn(inode),
200                                           ll_i2info(inode)->lli_smd,
201                                           page->index, page->index);
202                 LASSERT(rc == 0);
203                 unlock_page(page);
204                 page_cache_release(page);
205         }
206
207         EXIT;
208 }
209
210 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
211
212 #ifndef PG_inactive_clean
213 #ifdef CONFIG_DISCONTIGMEM
214 #error "sorry, we don't support DISCONTIGMEM yet"
215 #endif
216
217 /*
218  * __alloc_pages marks a zone as needing balancing if an allocation is
219  * performed when the zone has fewer free pages than its 'low' water
220  * mark.  its cleared when try_to_free_pages makes progress.
221  */
222 static int zones_need_balancing(void)
223 {
224         pg_data_t * pgdat;
225         zone_t *zone;
226         int i;
227
228         for ( pgdat = pgdat_list ; pgdat != NULL ; pgdat = pgdat->node_next ) {
229                 for ( i = pgdat->nr_zones-1 ; i >= 0 ; i-- ) {
230                         zone = &pgdat->node_zones[i];
231
232                         if ( zone->need_balance )
233                                 return 1;
234                 }
235         }
236         return 0;
237 }
238 #endif
239 /* 2.4 doesn't give us a way to find out how many pages we have
240  * cached 'cause we're not using buffer_heads.  we are very
241  * conservative here and flush the superblock of all dirty data
242  * when the vm (rmap or stock) thinks that it is running low
243  * and kswapd would have done work.  kupdated isn't good enough
244  * because writers (dbench) can dirty _very quickly_, and we
245  * allocate under writepage..
246  *
247  * 2.5 gets this right, see the {inc,dec}_page_state(nr_dirty, )
248  */
249 static int should_writeback(void)
250 {
251 #ifdef PG_inactive_clean
252         if (free_high(ALL_ZONES) > 0 || free_low(ANY_ZONE) > 0)
253 #else
254         if (zones_need_balancing())
255 #endif
256                 return 1;
257         return 0;
258 }
259
260 static int ll_alloc_brw(struct inode *inode, struct ll_writeback_pages *llwp)
261 {
262         memset(llwp, 0, sizeof(struct ll_writeback_pages));
263
264         llwp->max = inode->i_blksize >> PAGE_CACHE_SHIFT;
265         if (llwp->max == 0) {
266                 CERROR("forcing llwp->max to 1.  blksize: %lu\n",
267                        inode->i_blksize);
268                 llwp->max = 1;
269         }
270         llwp->pga = kmalloc(llwp->max * sizeof(*llwp->pga), GFP_ATOMIC);
271         if (llwp->pga == NULL)
272                 RETURN(-ENOMEM);
273         RETURN(0);
274 }
275
276 int ll_check_dirty(struct super_block *sb)
277 {
278         unsigned long old_flags; /* hack? */
279         int making_progress;
280         struct inode *inode;
281         int rc = 0;
282         ENTRY;
283
284         if (!should_writeback())
285                 return 0;
286
287         old_flags = current->flags;
288         current->flags |= PF_MEMALLOC;
289
290         spin_lock(&inode_lock);
291
292         /*
293          * first we try and write back dirty pages from dirty inodes
294          * until the VM thinkgs we're ok again..
295          */
296         do {
297                 struct ll_writeback_pages llwp;
298                 struct list_head *pos;
299                 inode = NULL;
300                 making_progress = 0;
301
302                 list_for_each_prev(pos, &sb->s_dirty) {
303                         inode = list_entry(pos, struct inode, i_list);
304
305                         if (!(inode->i_state & I_DIRTY_PAGES)) {
306                                 inode = NULL;
307                                 continue;
308                         }
309                         break;
310                 }
311
312                 if (inode == NULL)
313                         break;
314
315                 /* duplicate __sync_one, *sigh* */
316                 list_del(&inode->i_list);
317                 list_add(&inode->i_list, &inode->i_sb->s_locked_inodes);
318                 inode->i_state |= I_LOCK;
319                 inode->i_state &= ~I_DIRTY_PAGES;
320
321                 spin_unlock(&inode_lock);
322
323                 rc = ll_alloc_brw(inode, &llwp);
324                 if (rc != 0)
325                         GOTO(cleanup, rc);
326
327                 do {
328                         llwp.npgs = 0;
329                         ll_get_dirty_pages(inode, &llwp);
330                         if (llwp.npgs) {
331                                lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
332                                                    LPROC_LL_WB_PRESSURE,
333                                                    llwp.npgs);
334                                ll_writeback(inode, &llwp);
335                                rc += llwp.npgs;
336                                making_progress = 1;
337                         }
338                 } while (llwp.npgs && should_writeback());
339
340                 spin_lock(&inode_lock);
341
342                 if (!list_empty(&inode->i_mapping->dirty_pages))
343                         inode->i_state |= I_DIRTY_PAGES;
344
345                 inode->i_state &= ~I_LOCK;
346                 /*
347                  * we are sneaky and leave the inode on the dirty list,
348                  * even though it might not still be..
349                  */
350                 if (!(inode->i_state & I_FREEING)) {
351                         list_del(&inode->i_list);
352                         list_add(&inode->i_list, &inode->i_sb->s_dirty);
353                 }
354                 wake_up(&inode->i_wait);
355                 kfree(llwp.pga);
356         } while (making_progress && should_writeback());
357
358         /*
359          * and if that didn't work, we sleep on any data that might
360          * be under writeback..
361          */
362         while (should_writeback()) {
363                 if (list_empty(&sb->s_locked_inodes))
364                         break;
365
366                 inode = list_entry(sb->s_locked_inodes.next, struct inode,
367                                    i_list);
368
369                 atomic_inc(&inode->i_count); /* XXX hack? */
370                 spin_unlock(&inode_lock);
371                 wait_event(inode->i_wait, !(inode->i_state & I_LOCK));
372                 iput(inode);
373                 spin_lock(&inode_lock);
374         }
375
376         spin_unlock(&inode_lock);
377
378 cleanup:
379         current->flags = old_flags;
380
381         RETURN(rc);
382 }
383 #endif /* linux 2.5 */
384
385 int ll_batch_writepage(struct inode *inode, struct page *page)
386 {
387         unsigned long old_flags; /* hack? */
388         struct ll_writeback_pages llwp;
389         int rc = 0;
390         ENTRY;
391
392         old_flags = current->flags;
393         current->flags |= PF_MEMALLOC;
394         rc = ll_alloc_brw(inode, &llwp);
395         if (rc != 0)
396                 GOTO(restore_flags, rc);
397
398         if (llwp_consume_page(&llwp, inode, page) == 0)
399                 ll_get_dirty_pages(inode, &llwp);
400
401         if (llwp.npgs) {
402                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
403                                     LPROC_LL_WB_WRITEPAGE, llwp.npgs);
404                 ll_writeback(inode, &llwp);
405         }
406         kfree(llwp.pga);
407
408 restore_flags:
409         current->flags = old_flags;
410         RETURN(rc);
411 }