1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Lustre Lite I/O Page Cache
6 * Copyright (C) 2002 Cluster File Systems, Inc.
9 #include <linux/config.h>
10 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/stat.h>
14 #include <linux/iobuf.h>
15 #include <linux/errno.h>
16 #include <linux/smp_lock.h>
17 #include <linux/unistd.h>
18 #include <linux/version.h>
19 #include <asm/system.h>
20 #include <asm/uaccess.h>
23 #include <linux/stat.h>
24 #include <asm/uaccess.h>
25 #include <asm/segment.h>
27 #include <linux/pagemap.h>
28 #include <linux/smp_lock.h>
30 #define DEBUG_SUBSYSTEM S_LLITE
32 #include <linux/lustre_mds.h>
33 #include <linux/lustre_lite.h>
34 #include <linux/lustre_lib.h>
36 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
39 * Remove page from dirty list
41 static void __set_page_clean(struct page *page)
43 struct address_space *mapping = page->mapping;
49 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,4,9))
50 spin_lock(&pagecache_lock);
53 list_del(&page->list);
54 list_add(&page->list, &mapping->clean_pages);
56 inode = mapping->host;
57 if (list_empty(&mapping->dirty_pages)) {
58 CDEBUG(D_INODE, "inode clean\n");
59 inode->i_state &= ~I_DIRTY_PAGES;
61 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,4,10))
62 spin_unlock(&pagecache_lock);
67 inline void set_page_clean(struct page *page)
69 if (PageDirty(page)) {
71 __set_page_clean(page);
75 /* SYNCHRONOUS I/O to object storage for an inode */
76 static int ll_brw(int cmd, struct inode *inode, struct page *page, int create)
78 struct ll_inode_info *lli = ll_i2info(inode);
79 struct lov_stripe_md *lsm = lli->lli_smd;
80 struct io_cb_data *cbd = ll_init_cb();
85 CHECK_MOUNT_EPOCH(inode);
92 pg.off = ((obd_off)page->index) << PAGE_SHIFT;
93 pg.flag = create ? OBD_BRW_CREATE : 0;
95 err = obd_brw(cmd, ll_i2obdconn(inode),lsm, 1, &pg, ll_sync_io_cb, cbd);
100 /* returns the page unlocked, but with a reference */
101 static int ll_readpage(struct file *file, struct page *page)
103 struct inode *inode = page->mapping->host;
104 obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
108 if (!PageLocked(page))
111 if (inode->i_size <= offset) {
112 memset(kmap(page), 0, PAGE_SIZE);
114 GOTO(readpage_out, rc);
117 if (Page_Uptodate(page)) {
118 CERROR("Explain this please?\n");
119 GOTO(readpage_out, rc);
122 rc = ll_brw(OBD_BRW_READ, inode, page, 0);
127 SetPageUptodate(page);
133 static int ll_prepare_write(struct file *file, struct page *page, unsigned from,
136 struct inode *inode = page->mapping->host;
137 obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
143 if (!PageLocked(page))
146 if (Page_Uptodate(page))
147 GOTO(prepare_done, rc);
149 /* We're completely overwriting an existing page, so _don't_ set it up
150 * to date until commit_write */
151 if (from == 0 && to == PAGE_SIZE)
154 /* We are writing to a new page, no need to read old data */
155 if (inode->i_size <= offset) {
156 memset(addr, 0, PAGE_SIZE);
160 rc = ll_brw(OBD_BRW_READ, inode, page, 0);
165 SetPageUptodate(page);
170 /* returns the page unlocked, but with a reference */
171 static int ll_writepage(struct page *page)
173 struct inode *inode = page->mapping->host;
177 if (!PageLocked(page))
180 err = ll_brw(OBD_BRW_WRITE, inode, page, 1);
182 //SetPageUptodate(page);
183 set_page_clean(page);
185 CERROR("ll_brw failure %d\n", err);
192 /* SYNCHRONOUS I/O to object storage for an inode -- object attr will be updated
194 static int ll_commit_write(struct file *file, struct page *page,
195 unsigned from, unsigned to)
198 struct inode *inode = page->mapping->host;
199 struct ll_inode_info *lli = ll_i2info(inode);
200 struct lov_stripe_md *md = lli->lli_smd;
204 struct io_cb_data *cbd = ll_init_cb();
207 CHECK_MOUNT_EPOCH(inode);
211 pg.off = (((obd_off)page->index) << PAGE_SHIFT);
212 pg.flag = create ? OBD_BRW_CREATE : 0;
217 SetPageUptodate(page);
219 if (!PageLocked(page))
222 CDEBUG(D_INODE, "commit_page writing (off "LPD64"), count "LPD64"\n",
225 err = obd_brw(OBD_BRW_WRITE, ll_i2obdconn(inode), md,
226 1, &pg, ll_sync_io_cb, cbd);
229 size = pg.off + pg.count;
230 /* do NOT truncate when writing in the middle of a file */
231 if (size > inode->i_size)
232 inode->i_size = size;
235 } /* ll_commit_write */
237 void ll_truncate(struct inode *inode)
239 struct obdo oa = {0};
240 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
241 struct lustre_handle *lockhs = NULL;
246 /* object not yet allocated */
247 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
251 oa.o_id = lsm->lsm_object_id;
252 oa.o_mode = inode->i_mode;
253 oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | OBD_MD_FLTYPE;
255 CDEBUG(D_INFO, "calling punch for "LPX64" (all bytes after "LPD64")\n",
256 oa.o_id, inode->i_size);
258 err = ll_size_lock(inode, lsm, inode->i_size, LCK_PW, &lockhs);
260 CERROR("ll_size_lock failed: %d\n", err);
261 /* FIXME: What to do here? It's too late to back out... */
265 /* truncate == punch from new size to absolute end of file */
266 err = obd_punch(ll_i2obdconn(inode), &oa, lsm, inode->i_size,
269 CERROR("obd_truncate fails (%d)\n", err);
271 obdo_to_inode(inode, &oa, oa.o_valid);
273 err = ll_size_unlock(inode, lsm, LCK_PW, lockhs);
275 CERROR("ll_size_unlock failed: %d\n", err);
281 static int ll_direct_IO(int rw, struct inode *inode, struct kiobuf *iobuf,
282 unsigned long blocknr, int blocksize)
284 obd_count bufs_per_obdo = iobuf->nr_pages;
285 struct ll_inode_info *lli = ll_i2info(inode);
286 struct lov_stripe_md *lsm = lli->lli_smd;
287 struct brw_page *pga;
289 struct io_cb_data *cbd;
291 CHECK_MOUNT_EPOCH(inode);
294 if (!lsm || !lsm->lsm_object_id)
297 if (blocksize != PAGE_SIZE) {
298 CERROR("direct_IO blocksize != PAGE_SIZE\n");
306 OBD_ALLOC(pga, sizeof(*pga) * bufs_per_obdo);
308 OBD_FREE(cbd, sizeof(*cbd));
312 /* NB: we can't use iobuf->maplist[i]->index for the offset
313 * instead of "blocknr" because ->index contains garbage.
315 for (i = 0; i < bufs_per_obdo; i++, blocknr++) {
316 pga[i].pg = iobuf->maplist[i];
317 pga[i].count = PAGE_SIZE;
318 pga[i].off = (obd_off)blocknr << PAGE_SHIFT;
319 pga[i].flag = OBD_BRW_CREATE;
322 rc = obd_brw(rw == WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
323 ll_i2obdconn(inode), lsm, bufs_per_obdo, pga,
326 rc = bufs_per_obdo * PAGE_SIZE;
328 OBD_FREE(pga, sizeof(*pga) * bufs_per_obdo);
332 int ll_flush_inode_pages(struct inode * inode)
334 obd_count bufs_per_obdo = 0;
335 obd_size *count = NULL;
336 obd_off *offset = NULL;
337 obd_flag *flags = NULL;
342 spin_lock(&pagecache_lock);
344 spin_unlock(&pagecache_lock);
347 OBD_ALLOC(count, sizeof(*count) * bufs_per_obdo);
348 OBD_ALLOC(offset, sizeof(*offset) * bufs_per_obdo);
349 OBD_ALLOC(flags, sizeof(*flags) * bufs_per_obdo);
350 if (!count || !offset || !flags)
351 GOTO(out, err=-ENOMEM);
354 for (i = 0 ; i < bufs_per_obdo ; i++) {
355 count[i] = PAGE_SIZE;
356 offset[i] = ((obd_off)(iobuf->maplist[i])->index) << PAGE_SHIFT;
357 flags[i] = OBD_BRW_CREATE;
360 err = obd_brw(OBD_BRW_WRITE, ll_i2obdconn(inode),
361 ll_i2info(inode)->lli_smd, bufs_per_obdo,
362 iobuf->maplist, count, offset, flags, NULL, NULL);
364 err = bufs_per_obdo * 4096;
367 OBD_FREE(flags, sizeof(*flags) * bufs_per_obdo);
368 OBD_FREE(count, sizeof(*count) * bufs_per_obdo);
369 OBD_FREE(offset, sizeof(*offset) * bufs_per_obdo);
373 struct address_space_operations ll_aops = {
374 readpage: ll_readpage,
375 writepage: ll_writepage,
376 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,4,17))
377 direct_IO: ll_direct_IO,
379 sync_page: block_sync_page,
380 prepare_write: ll_prepare_write,
381 commit_write: ll_commit_write,