Whamcloud - gitweb
ONLY UPDATE IF YOU NEED THIS (i.e. Andreas probably will)
[fs/lustre-release.git] / lustre / llite / rw.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Lite I/O Page Cache
5  *
6  * Copyright (C) 2002 Cluster File Systems, Inc.
7  */
8
9 #include <linux/config.h>
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/string.h>
13 #include <linux/stat.h>
14 #include <linux/iobuf.h>
15 #include <linux/errno.h>
16 #include <linux/smp_lock.h>
17 #include <linux/unistd.h>
18 #include <linux/version.h>
19 #include <asm/system.h>
20 #include <asm/uaccess.h>
21
22 #include <linux/fs.h>
23 #include <linux/stat.h>
24 #include <asm/uaccess.h>
25 #include <asm/segment.h>
26 #include <linux/mm.h>
27 #include <linux/pagemap.h>
28 #include <linux/smp_lock.h>
29
30 #define DEBUG_SUBSYSTEM S_LLITE
31
32 #include <linux/lustre_mds.h>
33 #include <linux/lustre_lite.h>
34 #include <linux/lustre_lib.h>
35
36 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
37
38 /*
39  * Remove page from dirty list
40  */
41 static void __set_page_clean(struct page *page)
42 {
43         struct address_space *mapping = page->mapping;
44         struct inode *inode;
45
46         if (!mapping)
47                 return;
48
49 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,4,9))
50         spin_lock(&pagecache_lock);
51 #endif
52
53         list_del(&page->list);
54         list_add(&page->list, &mapping->clean_pages);
55
56         inode = mapping->host;
57         if (list_empty(&mapping->dirty_pages)) {
58                 CDEBUG(D_INODE, "inode clean\n");
59                 inode->i_state &= ~I_DIRTY_PAGES;
60         }
61 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,4,10))
62         spin_unlock(&pagecache_lock);
63 #endif
64         EXIT;
65 }
66
67 inline void set_page_clean(struct page *page)
68 {
69         if (PageDirty(page)) {
70                 ClearPageDirty(page);
71                 __set_page_clean(page);
72         }
73 }
74
75 /* SYNCHRONOUS I/O to object storage for an inode */
76 static int ll_brw(int cmd, struct inode *inode, struct page *page, int create)
77 {
78         struct ll_inode_info *lli = ll_i2info(inode);
79         struct lov_stripe_md *lsm = lli->lli_smd;
80         struct io_cb_data *cbd = ll_init_cb();
81         struct brw_page pg;
82         int err;
83         ENTRY;
84
85         CHECK_MOUNT_EPOCH(inode);
86
87         if (!cbd)
88                 RETURN(-ENOMEM);
89
90         pg.pg = page;
91         pg.count = PAGE_SIZE;
92         pg.off = ((obd_off)page->index) << PAGE_SHIFT;
93         pg.flag = create ? OBD_BRW_CREATE : 0;
94
95         err = obd_brw(cmd, ll_i2obdconn(inode),lsm, 1, &pg, ll_sync_io_cb, cbd);
96
97         RETURN(err);
98 } /* ll_brw */
99
100 /* returns the page unlocked, but with a reference */
101 static int ll_readpage(struct file *file, struct page *page)
102 {
103         struct inode *inode = page->mapping->host;
104         obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
105         int rc = 0;
106         ENTRY;
107
108         if (!PageLocked(page))
109                 LBUG();
110
111         if (inode->i_size <= offset) {
112                 memset(kmap(page), 0, PAGE_SIZE);
113                 kunmap(page);
114                 GOTO(readpage_out, rc);
115         }
116
117         if (Page_Uptodate(page)) {
118                 CERROR("Explain this please?\n");
119                 GOTO(readpage_out, rc);
120         }
121
122         rc = ll_brw(OBD_BRW_READ, inode, page, 0);
123         EXIT;
124
125  readpage_out:
126         if (!rc)
127                 SetPageUptodate(page);
128         unlock_page(page);
129         return 0;
130 } /* ll_readpage */
131
132
133 static int ll_prepare_write(struct file *file, struct page *page, unsigned from,
134                             unsigned to)
135 {
136         struct inode *inode = page->mapping->host;
137         obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
138         int rc = 0;
139         char *addr;
140         ENTRY;
141
142         addr = kmap(page);
143         if (!PageLocked(page))
144                 LBUG();
145
146         if (Page_Uptodate(page))
147                 GOTO(prepare_done, rc);
148
149         /* We're completely overwriting an existing page, so _don't_ set it up
150          * to date until commit_write */
151         if (from == 0 && to == PAGE_SIZE)
152                 RETURN(0);
153
154         /* We are writing to a new page, no need to read old data */
155         if (inode->i_size <= offset) {
156                 memset(addr, 0, PAGE_SIZE);
157                 goto prepare_done;
158         }
159
160         rc = ll_brw(OBD_BRW_READ, inode, page, 0);
161
162         EXIT;
163  prepare_done:
164         if (!rc)
165                 SetPageUptodate(page);
166
167         return rc;
168 }
169
170 /* returns the page unlocked, but with a reference */
171 static int ll_writepage(struct page *page)
172 {
173         struct inode *inode = page->mapping->host;
174         int err;
175         ENTRY;
176
177         if (!PageLocked(page))
178                 LBUG();
179
180         err = ll_brw(OBD_BRW_WRITE, inode, page, 1);
181         if ( !err ) {
182                 //SetPageUptodate(page);
183                 set_page_clean(page);
184         } else {
185                 CERROR("ll_brw failure %d\n", err);
186         }
187         unlock_page(page);
188         RETURN(err);
189 }
190
191
192 /* SYNCHRONOUS I/O to object storage for an inode -- object attr will be updated
193  * too */
194 static int ll_commit_write(struct file *file, struct page *page,
195                            unsigned from, unsigned to)
196 {
197         int create = 1;
198         struct inode *inode = page->mapping->host;
199         struct ll_inode_info *lli = ll_i2info(inode);
200         struct lov_stripe_md *md = lli->lli_smd;
201         struct brw_page pg;
202         int err;
203         loff_t size;
204         struct io_cb_data *cbd = ll_init_cb();
205         ENTRY;
206
207         CHECK_MOUNT_EPOCH(inode);
208
209         pg.pg = page;
210         pg.count = to;
211         pg.off = (((obd_off)page->index) << PAGE_SHIFT);
212         pg.flag = create ? OBD_BRW_CREATE : 0;
213
214         if (!cbd)
215                 RETURN(-ENOMEM);
216
217         SetPageUptodate(page);
218
219         if (!PageLocked(page))
220                 LBUG();
221
222         CDEBUG(D_INODE, "commit_page writing (off "LPD64"), count "LPD64"\n",
223                pg.off, pg.count);
224
225         err = obd_brw(OBD_BRW_WRITE, ll_i2obdconn(inode), md,
226                       1, &pg, ll_sync_io_cb, cbd);
227         kunmap(page);
228
229         size = pg.off + pg.count;
230         /* do NOT truncate when writing in the middle of a file */
231         if (size > inode->i_size)
232                 inode->i_size = size;
233
234         RETURN(err);
235 } /* ll_commit_write */
236
237 void ll_truncate(struct inode *inode)
238 {
239         struct obdo oa = {0};
240         struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
241         struct lustre_handle *lockhs = NULL;
242         int err;
243         ENTRY;
244
245         if (!lsm) {
246                 /* object not yet allocated */
247                 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
248                 return;
249         }
250
251         oa.o_id = lsm->lsm_object_id;
252         oa.o_mode = inode->i_mode;
253         oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | OBD_MD_FLTYPE;
254
255         CDEBUG(D_INFO, "calling punch for "LPX64" (all bytes after "LPD64")\n",
256                oa.o_id, inode->i_size);
257
258         err = ll_size_lock(inode, lsm, inode->i_size, LCK_PW, &lockhs);
259         if (err) {
260                 CERROR("ll_size_lock failed: %d\n", err);
261                 /* FIXME: What to do here?  It's too late to back out... */
262                 LBUG();
263         }
264
265         /* truncate == punch from new size to absolute end of file */
266         err = obd_punch(ll_i2obdconn(inode), &oa, lsm, inode->i_size,
267                         OBD_OBJECT_EOF);
268         if (err)
269                 CERROR("obd_truncate fails (%d)\n", err);
270         else
271                 obdo_to_inode(inode, &oa, oa.o_valid);
272
273         err = ll_size_unlock(inode, lsm, LCK_PW, lockhs);
274         if (err)
275                 CERROR("ll_size_unlock failed: %d\n", err);
276
277         EXIT;
278         return;
279 } /* ll_truncate */
280
281 static int ll_direct_IO(int rw, struct inode *inode, struct kiobuf *iobuf,
282                         unsigned long blocknr, int blocksize)
283 {
284         obd_count bufs_per_obdo = iobuf->nr_pages;
285         struct ll_inode_info *lli = ll_i2info(inode);
286         struct lov_stripe_md *lsm = lli->lli_smd;
287         struct brw_page *pga;
288         int i, rc = 0;
289         struct io_cb_data *cbd;
290
291         CHECK_MOUNT_EPOCH(inode);
292
293         ENTRY;
294         if (!lsm || !lsm->lsm_object_id)
295                 RETURN(-ENOMEM);
296
297         if (blocksize != PAGE_SIZE) {
298                 CERROR("direct_IO blocksize != PAGE_SIZE\n");
299                 RETURN(-EINVAL);
300         }
301
302         cbd = ll_init_cb();
303         if (!cbd)
304                 RETURN(-ENOMEM);
305
306         OBD_ALLOC(pga, sizeof(*pga) * bufs_per_obdo);
307         if (!pga) {
308                 OBD_FREE(cbd, sizeof(*cbd));
309                 RETURN(-ENOMEM);
310         }
311
312         /* NB: we can't use iobuf->maplist[i]->index for the offset
313          * instead of "blocknr" because ->index contains garbage.
314          */
315         for (i = 0; i < bufs_per_obdo; i++, blocknr++) {
316                 pga[i].pg = iobuf->maplist[i];
317                 pga[i].count = PAGE_SIZE;
318                 pga[i].off = (obd_off)blocknr << PAGE_SHIFT;
319                 pga[i].flag = OBD_BRW_CREATE;
320         }
321
322         rc = obd_brw(rw == WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
323                      ll_i2obdconn(inode), lsm, bufs_per_obdo, pga,
324                      ll_sync_io_cb, cbd);
325         if (rc == 0)
326                 rc = bufs_per_obdo * PAGE_SIZE;
327
328         OBD_FREE(pga, sizeof(*pga) * bufs_per_obdo);
329         RETURN(rc);
330 }
331
332 int ll_flush_inode_pages(struct inode * inode)
333 {
334         obd_count        bufs_per_obdo = 0;
335         obd_size         *count = NULL;
336         obd_off          *offset = NULL;
337         obd_flag         *flags = NULL;
338         int              err = 0;
339
340         ENTRY;
341
342         spin_lock(&pagecache_lock);
343
344         spin_unlock(&pagecache_lock);
345
346
347         OBD_ALLOC(count, sizeof(*count) * bufs_per_obdo);
348         OBD_ALLOC(offset, sizeof(*offset) * bufs_per_obdo);
349         OBD_ALLOC(flags, sizeof(*flags) * bufs_per_obdo);
350         if (!count || !offset || !flags)
351                 GOTO(out, err=-ENOMEM);
352
353 #if 0
354         for (i = 0 ; i < bufs_per_obdo ; i++) {
355                 count[i] = PAGE_SIZE;
356                 offset[i] = ((obd_off)(iobuf->maplist[i])->index) << PAGE_SHIFT;
357                 flags[i] = OBD_BRW_CREATE;
358         }
359
360         err = obd_brw(OBD_BRW_WRITE, ll_i2obdconn(inode),
361                       ll_i2info(inode)->lli_smd, bufs_per_obdo,
362                       iobuf->maplist, count, offset, flags, NULL, NULL);
363         if (err == 0)
364                 err = bufs_per_obdo * 4096;
365 #endif
366  out:
367         OBD_FREE(flags, sizeof(*flags) * bufs_per_obdo);
368         OBD_FREE(count, sizeof(*count) * bufs_per_obdo);
369         OBD_FREE(offset, sizeof(*offset) * bufs_per_obdo);
370         RETURN(err);
371 }
372
373 struct address_space_operations ll_aops = {
374         readpage: ll_readpage,
375         writepage: ll_writepage,
376 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,4,17))
377         direct_IO: ll_direct_IO,
378 #endif
379         sync_page: block_sync_page,
380         prepare_write: ll_prepare_write,
381         commit_write: ll_commit_write,
382         bmap: NULL
383 };
384 #endif