2 * OBDFS Super operations - also used for Lustre file system
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copryright (C) 1999 Stelias Computing Inc. <braam@stelias.com>
7 * Copryright (C) 1999 Seagate Technology Inc.
10 #define __NO_VERSION__
12 #include <linux/locks.h>
13 #include <linux/swap.h>
15 #include <linux/obd_support.h>
16 #include <linux/obd_class.h>
17 #include <linux/obdfs.h>
21 int nfract; /* Percentage of buffer cache dirty to
23 int ndirty; /* Maximum number of dirty blocks to write out per
25 int nrefill; /* Number of clean buffers to try to obtain
26 each time we call refill */
27 int nref_dirt; /* Dirty buffer threshold for activating bdflush
28 when trying to refill buffers. */
29 int interval; /* jiffies delay between pupdate flushes */
30 int age_buffer; /* Time for normal buffer to age before we flush it */
31 int age_super; /* Time for superblock to age before we flush it */
32 } pupd_prm = {40, 500, 64, 256, 5*HZ, 30*HZ, 5*HZ };
34 /* Called with the superblock list lock */
35 static int obdfs_enqueue_pages(struct inode *inode, struct obdo **obdo,
36 int nr_slots, struct page **pages, char **bufs,
37 obd_size *counts, obd_off *offsets,
38 obd_flag *flag, unsigned long check_time)
40 struct list_head *page_list = obdfs_iplist(inode);
41 struct list_head *tmp;
47 /* Traverse list in reverse order, so we do FIFO, not LIFO order */
48 while ( (tmp = tmp->prev) != page_list && num < nr_slots ) {
49 struct obdfs_pgrq *req;
52 req = list_entry(tmp, struct obdfs_pgrq, rq_plist);
56 if (req->rq_jiffies > check_time)
57 break; /* pages are in chronological order */
59 /* Only allocate the obdo if we will actually do I/O here */
62 *obdo = obdo_fromid(IID(inode), inode->i_ino,
64 if ( IS_ERR(*obdo) ) {
65 int err = PTR_ERR(*obdo);
72 /* FIXME revisit fromid & from_inode */
73 obdfs_from_inode(*obdo, inode);
74 *flag = OBD_BRW_CREATE;
77 /* Remove request from list before write to avoid conflict.
78 * Note that obdfs_pgrq_del() also deletes the request.
82 CDEBUG(D_CACHE, "no page \n");
86 bufs[num] = (char *)page_address(page);
88 counts[num] = PAGE_SIZE;
89 offsets[num] = ((obd_off)page->index) << PAGE_SHIFT;
90 CDEBUG(D_INFO, "ENQ inode %ld, page %p addr %p to vector\n",
91 inode->i_ino, page, (char *)page_address(page));
95 if (!list_empty(page_list))
96 CDEBUG(D_INFO, "inode %ld list not empty\n", inode->i_ino);
97 CDEBUG(D_INFO, "added %d page(s) to vector\n", num);
101 } /* obdfs_enqueue_pages */
103 /* Dequeue cached pages for a dying inode without writing them to disk. */
104 void obdfs_dequeue_pages(struct inode *inode)
106 struct list_head *tmp;
108 obd_down(&obdfs_i2sbi(inode)->osi_list_mutex);
109 tmp = obdfs_islist(inode);
110 if ( list_empty(tmp) ) {
111 CDEBUG(D_INFO, "no dirty pages for inode %ld\n", inode->i_ino);
112 obd_up(&obdfs_i2sbi(inode)->osi_list_mutex);
117 /* take it out of the super list */
119 INIT_LIST_HEAD(obdfs_islist(inode));
121 tmp = obdfs_iplist(inode);
122 while ( (tmp = tmp->prev) != obdfs_iplist(inode) ) {
123 struct obdfs_pgrq *req;
126 req = list_entry(tmp, struct obdfs_pgrq, rq_plist);
128 /* take it out of the list and free */
130 /* now put the page away */
134 obd_up(&obdfs_i2sbi(inode)->osi_list_mutex);
136 /* decrement inode reference for page cache */
140 /* Remove writeback requests for the superblock */
141 int obdfs_flush_reqs(struct list_head *inode_list, unsigned long check_time)
143 struct list_head *tmp;
147 struct inode *inodes[MAX_IOVEC]; /* write data back to these */
148 struct page *pages[MAX_IOVEC]; /* call put_page on these */
149 struct obdo *obdos[MAX_IOVEC];
150 char *bufs[MAX_IOVEC];
151 obd_size counts[MAX_IOVEC];
152 obd_off offsets[MAX_IOVEC];
153 obd_flag flags[MAX_IOVEC];
154 obd_count bufs_per_obdo[MAX_IOVEC];
156 struct obdfs_sb_info *sbi;
160 CDEBUG(D_INODE, "no list\n");
165 sbi = list_entry(inode_list, struct obdfs_sb_info, osi_inodes);
167 obd_down(&sbi->osi_list_mutex);
168 if ( list_empty(inode_list) ) {
169 CDEBUG(D_CACHE, "list empty: memory %ld\n", obd_memory);
170 obd_up(&sbi->osi_list_mutex);
175 /* Add each inode's dirty pages to a write vector, and write it.
176 * Traverse list in reverse order, so we do FIFO, not LIFO order
182 while ( (tmp = tmp->prev) != inode_list && total_io < pupd_prm.ndirty) {
183 struct obdfs_inode_info *ii;
187 ii = list_entry(tmp, struct obdfs_inode_info, oi_inodes);
188 inode = list_entry(ii, struct inode, u);
189 inodes[num_obdos] = inode;
190 obdos[num_obdos] = NULL;
191 CDEBUG(D_INFO, "checking inode %ld pages\n", inode->i_ino);
193 /* Make sure we reference "inode" and not "inodes[num_obdos]",
194 * as num_obdos will change after the loop is run.
196 if (!list_empty(obdfs_iplist(inode))) {
197 res = obdfs_enqueue_pages(inode, &obdos[num_obdos],
199 &pages[num_io], &bufs[num_io],
204 CDEBUG(D_INFO, "FLUSH inode %ld, pages flushed: %d\n",
208 "fatal: unable to enqueue inode %ld (err %d)\n",
210 /* XXX Move bad inode to end of list so we can
211 * continue with flushing list. This is a
212 * temporary measure to avoid machine lockups.
215 list_add(tmp, inode_list);
222 bufs_per_obdo[num_obdos] = res;
226 if ( num_io == MAX_IOVEC ) {
227 obd_up(&sbi->osi_list_mutex);
228 err = obdfs_do_vec_wr(inodes, num_io, num_obdos,
229 obdos, bufs_per_obdo,
234 "fatal: unable to do vec_wr (err %d)\n", err);
238 obd_down(&sbi->osi_list_mutex);
245 obd_up(&sbi->osi_list_mutex);
247 /* flush any remaining I/Os */
249 err = obdfs_do_vec_wr(inodes, num_io, num_obdos, obdos,
250 bufs_per_obdo, pages, bufs, counts,
253 CDEBUG(D_INODE, "fatal: unable to do vec_wr (err %d)\n", err);
258 /* Remove inode from superblock dirty list when no more pages.
259 * Make sure we don't point at the current inode with tmp
260 * when we re-init the list on the inode, or we will loop.
262 obd_down(&sbi->osi_list_mutex);
264 while ( (tmp = tmp->prev) != inode_list ) {
265 struct obdfs_inode_info *ii;
268 ii = list_entry(tmp, struct obdfs_inode_info, oi_inodes);
269 inode = list_entry(ii, struct inode, u);
270 CDEBUG(D_INFO, "checking inode %ld empty\n", inode->i_ino);
271 if (list_empty(obdfs_iplist(inode))) {
272 CDEBUG(D_INFO, "remove inode %ld from dirty list\n",
275 list_del(obdfs_islist(inode));
276 /* decrement inode reference for page cache */
278 INIT_LIST_HEAD(obdfs_islist(inode));
281 obd_up(&sbi->osi_list_mutex);
283 CDEBUG(D_INFO, "flushed %d pages in total\n", total_io);
286 return err ? err : total_io;
287 } /* obdfs_flush_reqs */
290 /* Walk all of the superblocks and write out blocks which are too old.
291 * Return the maximum number of blocks written for a single filesystem.
293 int obdfs_flush_dirty_pages(unsigned long check_time)
295 struct list_head *sl;
299 sl = &obdfs_super_list;
300 while ( (sl = sl->prev) != &obdfs_super_list ) {
301 struct obdfs_sb_info *sbi =
302 list_entry(sl, struct obdfs_sb_info, osi_list);
305 /* walk write requests here, use the sb, check the time */
306 ret = obdfs_flush_reqs(&sbi->osi_inodes, check_time);
307 /* XXX handle error? What to do with it? */
309 max = ret > max ? ret : max;
313 } /* obdfs_flush_dirty_pages */
316 static struct task_struct *pupdated;
318 static int pupdate(void *unused)
320 int interval = pupd_prm.interval;
321 long age = pupd_prm.age_buffer;
328 pupdated->session = 1;
330 strcpy(pupdated->comm, "pupdated");
332 printk("pupdated activated...\n");
334 spin_lock_irq(&pupdated->sigmask_lock);
335 sigfillset(&pupdated->blocked);
336 siginitsetinv(&pupdated->blocked, sigmask(SIGTERM));
337 recalc_sigpending(pupdated);
338 spin_unlock_irq(&pupdated->sigmask_lock);
343 /* update interval */
345 set_task_state(pupdated, TASK_INTERRUPTIBLE);
346 schedule_timeout(interval);
348 if (signal_pending(pupdated))
351 spin_lock_irq(&pupdated->sigmask_lock);
352 if (sigismember(&pupdated->signal, SIGTERM))
354 sigdelset(&pupdated->signal, SIGTERM);
357 recalc_sigpending(pupdated);
358 spin_unlock_irq(&pupdated->sigmask_lock);
360 printk("pupdated stopped...\n");
361 set_task_state(pupdated, TASK_STOPPED);
366 /* asynchronous setattr etc for the future ...
367 obdfs_flush_dirty_inodes(jiffies - pupd_prm.age_super);
370 dirty_limit = nr_free_buffer_pages() * pupd_prm.nfract / 100;
372 dirty_limit = 16384 * pupd_prm.nfract / 100;
373 CDEBUG(D_CACHE, "dirty_limit %ld, cache_count %ld, wrote %d\n",
374 dirty_limit, obdfs_cache_count, wrote);
376 if (obdfs_cache_count > dirty_limit) {
378 if ( wrote < pupd_prm.ndirty )
380 CDEBUG(D_CACHE, "age %ld, interval %d\n",
383 if ( wrote < pupd_prm.ndirty >> 1 &&
384 obdfs_cache_count < dirty_limit / 2) {
385 interval = pupd_prm.interval;
386 age = pupd_prm.age_buffer;
387 } else if (obdfs_cache_count > dirty_limit / 2) {
389 if ( wrote < pupd_prm.ndirty )
391 CDEBUG(D_CACHE, "age %ld, interval %d\n",
396 wrote = obdfs_flush_dirty_pages(jiffies - age);
401 int obdfs_flushd_init(void)
404 kernel_thread(bdflush, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
406 kernel_thread(pupdate, NULL, 0);
407 CDEBUG(D_PSDEV, __FUNCTION__ ": flushd inited\n");
411 int obdfs_flushd_cleanup(void)
415 if (pupdated) /* for debugging purposes only */
416 CDEBUG(D_CACHE, "pupdated->state = %lx\n", pupdated->state);
418 /* deliver a signal to pupdated to shut it down */
419 if (pupdated && (pupdated->state == TASK_RUNNING ||
420 pupdated->state == TASK_INTERRUPTIBLE )) {
421 unsigned long timeout = HZ/20;
422 unsigned long count = 0;
423 send_sig_info(SIGTERM, (struct siginfo *)1, pupdated);
425 if ((count % 2*HZ) == timeout)
426 printk(KERN_INFO "wait for pupdated to stop\n");
428 set_current_state(TASK_INTERRUPTIBLE);
429 schedule_timeout(timeout);