Whamcloud - gitweb
19c74370af39dd1b277c533e9847a0537eef4c34
[fs/lustre-release.git] / lustre / obdfs / flushd.c
1 /*
2  * OBDFS Super operations - also used for Lustre file system
3  *
4   *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  * Copryright (C) 1999 Stelias Computing Inc. <braam@stelias.com>
7  * Copryright (C) 1999 Seagate Technology Inc.
8  *
9  */
10 #include <linux/config.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/mm.h>
14 #include <linux/string.h>
15 #include <linux/stat.h>
16 #include <linux/errno.h>
17 #include <linux/locks.h>
18 #include <linux/unistd.h>
19
20 #include <asm/system.h>
21 #include <asm/uaccess.h>
22
23 #include <linux/fs.h>
24 #include <linux/stat.h>
25 #include <asm/uaccess.h>
26 #include <linux/vmalloc.h>
27 #include <asm/segment.h>
28 #include <linux/sched.h>
29
30 #include <linux/obd_support.h>
31 #include <linux/obd_class.h>
32 #include <linux/obdfs.h>
33
34
35
36 struct {
37         int nfract;  /* Percentage of buffer cache dirty to 
38                         activate bdflush */
39         int ndirty;  /* Maximum number of dirty blocks to write out per
40                         wake-cycle */
41         int nrefill; /* Number of clean buffers to try to obtain
42                                 each time we call refill */
43         int nref_dirt; /* Dirty buffer threshold for activating bdflush
44                           when trying to refill buffers. */
45         int interval; /* jiffies delay between kupdate flushes */
46         int age_buffer;  /* Time for normal buffer to age before we flush it */
47         int age_super;  /* Time for superblock to age before we flush it */
48 } pupd_prm = {40, 500, 64, 256, 5*HZ, 30*HZ, 5*HZ };
49
50
51 atatic void obdfs_flush_reqs(struct obdfs_super_info *sbi, int wait, 
52                              int check_time) 
53 {
54         struct list_head *wr;
55         struct pg_req *req;
56         
57         wr = &si.s_wr_head;
58         while ( (wr = wr->next) != &si.s_wr_head ) {
59                 req = list_entry(wr, struct pg_req, rq_list);
60
61                 if (!check_time || 
62                     req->rq_jiffies <= (jiffies - pup_rpm.age_buffer)) {
63                         /* write request out to disk */
64                         obdfs_write_page(req->inode, req->page);
65                 }
66
67         }
68
69 }
70
71
72 static void obdfs_flush_dirty_pages(int check_time)
73 {
74         struct list_head *sl;
75
76         sl = &obdfs_super_list;
77         while ( (sl = sl->next) != &obdfs_super_listhead ) {
78                 struct obdfs_super_entry *entry = 
79                         list_entry(sl, struct obdfs_super_entry, sl_chain);
80                 struct obdfs_sb_info *sbi = sl->sl_sbi;
81
82                 /* walk write requests here */
83                 obdfs_flush_reqs(sbi, 0);
84         }
85
86         /* again, but now we wait for completion */
87         sl = &obdfs_super_listhead;
88         while ( (sl = sl->next) != &obdfs_super_listhead ) {
89                 struct obdfs_super_list *entry = 
90                         list_entry(sl, struct obdfs_super_list, sl_chain);
91                 struct super_block *sb = sl->sl_sb;
92
93                 /* walk write requests here */
94                 si = &sb->u.generic;
95                 obdfs_flush_reqs(si, 1);
96         }
97 }
98
99 static struct task_struct *pupdatd;
100
101 static int pupdate(void) 
102 {
103         struct task_struct * tsk = current;
104         int interval;
105         
106         pupdated = current;
107         tsk->session = 1;
108         tsk->pgrp = 1;
109         strcpy(tsk->comm, "pupdate");
110
111         /* sigstop and sigcont will stop and wakeup kupdate */
112         spin_lock_irq(&tsk->sigmask_lock);
113         sigfillset(&tsk->blocked);
114         siginitsetinv(&current->blocked, sigmask(SIGCONT) | sigmask(SIGSTOP));
115         recalc_sigpending(tsk);
116         spin_unlock_irq(&tsk->sigmask_lock);
117
118         for (;;) {
119                 /* update interval */
120                 interval = pupd_prm.interval;
121                 if (interval)
122                 {
123                         tsk->state = TASK_INTERRUPTIBLE;
124                         schedule_timeout(interval);
125                 }
126                 else
127                 {
128                 stop_pupdate:
129                         tsk->state = TASK_STOPPED;
130                         schedule(); /* wait for SIGCONT */
131                 }
132                 /* check for sigstop */
133                 if (signal_pending(tsk))
134                 {
135                         int stopped = 0;
136                         spin_lock_irq(&tsk->sigmask_lock);
137                         if (sigismember(&tsk->signal, SIGSTOP))
138                         {
139                                 sigdelset(&tsk->signal, SIGSTOP);
140                                 stopped = 1;
141                         }
142                         recalc_sigpending(tsk);
143                         spin_unlock_irq(&tsk->sigmask_lock);
144                         if (stopped)
145                                 goto stop_pupdate;
146                 }
147                 printk("pupdate() activated...\n");
148                 /* flush_inodes(); */
149                 obdfs_flush_dirty_pages(1);
150         }
151 }
152
153
154 int flushd_init(void)
155 {
156         /*      kernel_thread(bdflush, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND); */
157         kernel_thread(pupdate, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
158         return 0;
159 }
160
161 int flushd_cleanup(void)
162 {
163         /* this should deliver a signal to */
164         
165
166         /* XXX Andreas, we will do this later, for now, you must kill
167            pupdated with a SIGSTOP from userland, before unloading obdfs.o
168         */
169         if (pupdated) {
170                 /* send updated a STOP signal */
171                 /* then let it run at least once, before continuing */
172                 1;
173         }
174
175 }