Whamcloud - gitweb
landing b_cmobd_merge on HEAD
[fs/lustre-release.git] / lustre / llite / rw.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Lite I/O page cache routines shared by different kernel revs
5  *
6  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #include <linux/config.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/string.h>
28 #include <linux/stat.h>
29 #include <linux/errno.h>
30 #include <linux/smp_lock.h>
31 #include <linux/unistd.h>
32 #include <linux/version.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
35
36 #include <linux/fs.h>
37 #include <linux/stat.h>
38 #include <asm/uaccess.h>
39 #include <asm/segment.h>
40 #include <linux/mm.h>
41 #include <linux/pagemap.h>
42 #include <linux/smp_lock.h>
43
44 #define DEBUG_SUBSYSTEM S_LLITE
45
46 #include <linux/lustre_mds.h>
47 #include <linux/lustre_lite.h>
48 #include "llite_internal.h"
49 #include <linux/lustre_compat25.h>
50
51 #ifndef list_for_each_prev_safe
52 #define list_for_each_prev_safe(pos, n, head) \
53         for (pos = (head)->prev, n = pos->prev; pos != (head); \
54                 pos = n, n = pos->prev )
55 #endif
56
57 /* SYNCHRONOUS I/O to object storage for an inode */
58 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
59                   struct page *page, int flags)
60 {
61         struct ll_inode_info *lli = ll_i2info(inode);
62         struct lov_stripe_md *lsm = lli->lli_smd;
63         struct brw_page pg;
64         int rc;
65         ENTRY;
66
67         pg.pg = page;
68         pg.off = ((obd_off)page->index) << PAGE_SHIFT;
69
70         if (cmd == OBD_BRW_WRITE &&
71             (pg.off + PAGE_SIZE > inode->i_size))
72                 pg.count = inode->i_size % PAGE_SIZE;
73         else
74                 pg.count = PAGE_SIZE;
75
76         CDEBUG(D_PAGE, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
77                cmd & OBD_BRW_WRITE ? "write" : "read", pg.count, inode->i_ino,
78                pg.off, pg.off);
79         if (pg.count == 0) {
80                 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
81                        LPU64"\n", inode->i_ino, inode, inode->i_size,
82                        page->mapping->host, page->mapping->host->i_size,
83                        page->index, pg.off);
84         }
85
86         pg.flag = flags;
87
88         if (cmd == OBD_BRW_WRITE)
89                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
90                                     LPROC_LL_BRW_WRITE, pg.count);
91         else
92                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
93                                     LPROC_LL_BRW_READ, pg.count);
94         rc = obd_brw(cmd, ll_i2obdexp(inode), oa, lsm, 1, &pg, NULL);
95         if (rc == 0)
96                 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
97         else if (rc != -EIO)
98                 CERROR("error from obd_brw: rc = %d\n", rc);
99         RETURN(rc);
100 }
101
102 /* this isn't where truncate starts.   roughly:
103  * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate
104  * we grab the lock back in setattr_raw to avoid races. */
105 void ll_truncate(struct inode *inode)
106 {
107         struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
108         struct obdo oa;
109         int rc;
110         ENTRY;
111         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
112                inode->i_generation, inode);
113
114         if (!lsm) {
115                 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
116                        inode->i_ino);
117                 EXIT;
118                 return;
119         }
120
121         oa.o_id = lsm->lsm_object_id;
122         oa.o_gr = lsm->lsm_object_gr;
123         oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
124         obdo_from_inode(&oa, inode, OBD_MD_FLTYPE|OBD_MD_FLMODE|OBD_MD_FLATIME|
125                                     OBD_MD_FLMTIME | OBD_MD_FLCTIME);
126
127         CDEBUG(D_INFO, "calling punch for "LPX64" (all bytes after %Lu)\n",
128                oa.o_id, inode->i_size);
129
130         /* truncate == punch from new size to absolute end of file */
131         /* NB: obd_punch must be called with i_sem held!  It updates the kms! */
132         rc = obd_punch(ll_i2obdexp(inode), &oa, lsm, inode->i_size,
133                        OBD_OBJECT_EOF, NULL);
134         if (rc)
135                 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
136         else
137                 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
138                                           OBD_MD_FLATIME | OBD_MD_FLMTIME |
139                                           OBD_MD_FLCTIME);
140
141         EXIT;
142         return;
143 } /* ll_truncate */
144
145 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
146 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
147                      unsigned to)
148 {
149         struct inode *inode = page->mapping->host;
150         struct ll_inode_info *lli = ll_i2info(inode);
151         struct lov_stripe_md *lsm = lli->lli_smd;
152         obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
153         struct brw_page pga;
154         struct obdo oa;
155         __u64 kms;
156         int rc = 0;
157         ENTRY;
158
159         LASSERT(PageLocked(page));
160         (void)llap_cast_private(page); /* assertion */
161
162         /* Check to see if we should return -EIO right away */
163         pga.pg = page;
164         pga.off = offset;
165         pga.count = PAGE_SIZE;
166         pga.flag = 0;
167
168         oa.o_id = lsm->lsm_object_id;
169         oa.o_gr = lsm->lsm_object_gr;
170         oa.o_mode = inode->i_mode;
171         oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE
172                                 | OBD_MD_FLTYPE | OBD_MD_FLGROUP;
173
174         rc = obd_brw(OBD_BRW_CHECK, ll_i2obdexp(inode), &oa, lsm, 1, &pga,
175                      NULL);
176         if (rc)
177                 RETURN(rc);
178
179         if (PageUptodate(page))
180                 RETURN(0);
181
182         /* We're completely overwriting an existing page, so _don't_ set it up
183          * to date until commit_write */
184         if (from == 0 && to == PAGE_SIZE) {
185                 POISON_PAGE(page, 0x11);
186                 RETURN(0);
187         }
188
189         /* If are writing to a new page, no need to read old data.  The extent
190          * locking will have updated the KMS, and for our purposes here we can
191          * treat it like i_size. */
192         kms = lov_merge_size(lsm, 1);
193         if (kms <= offset) {
194                 memset(kmap(page), 0, PAGE_SIZE);
195                 kunmap(page);
196                 GOTO(prepare_done, rc = 0);
197         }
198
199         /* XXX could be an async ocp read.. read-ahead? */
200         rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
201         if (rc == 0) {
202                 /* bug 1598: don't clobber blksize */
203                 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
204                 obdo_refresh_inode(inode, &oa, oa.o_valid);
205         }
206
207         EXIT;
208  prepare_done:
209         if (rc == 0)
210                 SetPageUptodate(page);
211
212         return rc;
213 }
214
215 struct ll_async_page *llap_from_cookie(void *cookie)
216 {
217         struct ll_async_page *llap = cookie;
218         if (llap->llap_magic != LLAP_MAGIC)
219                 return ERR_PTR(-EINVAL);
220         return llap;
221 };
222
223 static int ll_ap_make_ready(void *data, int cmd)
224 {
225         struct ll_async_page *llap;
226         struct page *page;
227         ENTRY;
228
229         llap = llap_from_cookie(data);
230         if (IS_ERR(llap))
231                 RETURN(-EINVAL);
232
233         page = llap->llap_page;
234
235         LASSERT(cmd != OBD_BRW_READ);
236
237         /* we're trying to write, but the page is locked.. come back later */
238         if (TryLockPage(page))
239                 RETURN(-EAGAIN);
240
241         LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
242         page_cache_get(page);
243
244         /* if we left PageDirty we might get another writepage call
245          * in the future.  list walkers are bright enough
246          * to check page dirty so we can leave it on whatever list
247          * its on.  XXX also, we're called with the cli list so if
248          * we got the page cache list we'd create a lock inversion
249          * with the removepage path which gets the page lock then the
250          * cli lock */
251         clear_page_dirty(page);
252         RETURN(0);
253 }
254
255 /* We have two reasons for giving llite the opportunity to change the 
256  * write length of a given queued page as it builds the RPC containing
257  * the page: 
258  *
259  * 1) Further extending writes may have landed in the page cache
260  *    since a partial write first queued this page requiring us
261  *    to write more from the page cache.
262  * 2) We might have raced with truncate and want to avoid performing
263  *    write RPCs that are just going to be thrown away by the 
264  *    truncate's punch on the storage targets.
265  *
266  * The kms serves these purposes as it is set at both truncate and extending
267  * writes.
268  */
269 static int ll_ap_refresh_count(void *data, int cmd)
270 {
271         struct ll_async_page *llap;
272         struct lov_stripe_md *lsm;
273         struct page *page;
274         __u64 kms;
275         ENTRY;
276
277         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
278         LASSERT(cmd != OBD_BRW_READ);
279
280         llap = llap_from_cookie(data);
281         if (IS_ERR(llap))
282                 RETURN(PTR_ERR(llap));
283
284         page = llap->llap_page;
285         lsm = ll_i2info(page->mapping->host)->lli_smd;
286         kms = lov_merge_size(lsm, 1);
287
288         /* catch race with truncate */
289         if (((__u64)page->index << PAGE_SHIFT) >= kms)
290                 return 0;
291
292         /* catch sub-page write at end of file */
293         if (((__u64)page->index << PAGE_SHIFT) + PAGE_SIZE > kms)
294                 return kms % PAGE_SIZE;
295
296         return PAGE_SIZE;
297 }
298
299 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
300 {
301         struct lov_stripe_md *lsm;
302         obd_flag valid_flags;
303
304         lsm = ll_i2info(inode)->lli_smd;
305
306         oa->o_id = lsm->lsm_object_id;
307         oa->o_gr = lsm->lsm_object_gr;
308         oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
309         valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
310         if (cmd == OBD_BRW_WRITE) {
311                 oa->o_valid |= OBD_MD_FLIFID | OBD_MD_FLEPOCH;
312                 mdc_pack_fid(obdo_fid(oa), inode->i_ino, 0, inode->i_mode);
313                 obdo_fid(oa)->mds = ll_i2info(inode)->lli_mds;
314                 oa->o_easize = ll_i2info(inode)->lli_io_epoch;
315
316                 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME;
317         }
318
319         obdo_from_inode(oa, inode, valid_flags);
320 }
321
322 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
323 {
324         struct ll_async_page *llap;
325         ENTRY;
326
327         llap = llap_from_cookie(data);
328         if (IS_ERR(llap)) {
329                 EXIT;
330                 return;
331         }
332
333         ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
334         EXIT;
335 }
336
337 static struct obd_async_page_ops ll_async_page_ops = {
338         .ap_make_ready =        ll_ap_make_ready,
339         .ap_refresh_count =     ll_ap_refresh_count,
340         .ap_fill_obdo =         ll_ap_fill_obdo,
341         .ap_completion =        ll_ap_completion,
342 };
343
344 struct ll_async_page *llap_cast_private(struct page *page)
345 {
346         struct ll_async_page *llap = (struct ll_async_page *)page->private;
347
348         LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
349                  "page %p private %lu gave magic %d which != %d\n",
350                  page, page->private, llap->llap_magic, LLAP_MAGIC);
351
352         return llap;
353 }
354
355 /* XXX have the exp be an argument? */
356 struct ll_async_page *llap_from_page(struct page *page)
357 {
358         struct ll_async_page *llap;
359         struct obd_export *exp;
360         struct inode *inode = page->mapping->host;
361         struct ll_sb_info *sbi = ll_i2sbi(inode);
362         int rc;
363         ENTRY;
364
365         llap = llap_cast_private(page);
366         if (llap != NULL)
367                 RETURN(llap);
368
369         exp = ll_i2obdexp(page->mapping->host);
370         if (exp == NULL)
371                 RETURN(ERR_PTR(-EINVAL));
372
373         OBD_ALLOC(llap, sizeof(*llap));
374         if (llap == NULL)
375                 RETURN(ERR_PTR(-ENOMEM));
376         llap->llap_magic = LLAP_MAGIC;
377         rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
378                                  (obd_off)page->index << PAGE_SHIFT,
379                                  &ll_async_page_ops, llap, &llap->llap_cookie);
380         if (rc) {
381                 OBD_FREE(llap, sizeof(*llap));
382                 RETURN(ERR_PTR(rc));
383         }
384
385         CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
386                page, llap->llap_cookie, (obd_off)page->index << PAGE_SHIFT);
387         /* also zeroing the PRIVBITS low order bitflags */
388         page->private = (unsigned long)llap;
389         llap->llap_page = page;
390
391         spin_lock(&sbi->ll_lock);
392         sbi->ll_pglist_gen++;
393         list_add_tail(&llap->llap_proc_item, &sbi->ll_pglist);
394         spin_unlock(&sbi->ll_lock);
395
396         RETURN(llap);
397 }
398
399 void lov_increase_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
400                       obd_off size);
401 /* update our write count to account for i_size increases that may have
402  * happened since we've queued the page for io. */
403
404 /* be careful not to return success without setting the page Uptodate or
405  * the next pass through prepare_write will read in stale data from disk. */
406 int ll_commit_write(struct file *file, struct page *page, unsigned from,
407                     unsigned to)
408 {
409         struct inode *inode = page->mapping->host;
410         struct ll_inode_info *lli = ll_i2info(inode);
411         struct lov_stripe_md *lsm = lli->lli_smd;
412         struct obd_export *exp = NULL;
413         struct ll_async_page *llap;
414         loff_t size;
415         int rc = 0;
416         ENTRY;
417
418         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
419         LASSERT(inode == file->f_dentry->d_inode);
420         LASSERT(PageLocked(page));
421
422         CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
423                inode, page, from, to, page->index);
424
425         llap = llap_from_page(page);
426         if (IS_ERR(llap))
427                 RETURN(PTR_ERR(llap));
428
429         /* queue a write for some time in the future the first time we
430          * dirty the page */
431         if (!PageDirty(page)) {
432                 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
433                                      LPROC_LL_DIRTY_MISSES);
434
435                 exp = ll_i2obdexp(inode);
436                 if (exp == NULL)
437                         RETURN(-EINVAL);
438
439                 /* _make_ready only sees llap once we've unlocked the page */
440                 llap->llap_write_queued = 1;
441                 rc = obd_queue_async_io(exp, lsm, NULL, llap->llap_cookie,
442                                         OBD_BRW_WRITE, 0, 0, 0, 0);
443                 if (rc != 0) { /* async failed, try sync.. */
444                         struct obd_io_group *oig;
445                         rc = oig_init(&oig);
446                         if (rc)
447                                 GOTO(out, rc);
448
449                         llap->llap_write_queued = 0;
450                         rc = obd_queue_group_io(exp, lsm, NULL, oig,
451                                                 llap->llap_cookie,
452                                                 OBD_BRW_WRITE, 0, to, 0,
453                                                 ASYNC_READY | ASYNC_URGENT |
454                                                 ASYNC_COUNT_STABLE |
455                                                 ASYNC_GROUP_SYNC);
456
457                         if (rc)
458                                 GOTO(free_oig, rc);
459
460                         rc = obd_trigger_group_io(exp, lsm, NULL, oig);
461                         if (rc)
462                                 GOTO(free_oig, rc);
463
464                         rc = oig_wait(oig);
465 free_oig:
466                         oig_release(oig);
467                         GOTO(out, rc);
468                 }
469                 LL_CDEBUG_PAGE(D_PAGE, page, "write queued\n");
470                 //llap_write_pending(inode, llap);
471         } else {
472                 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
473                                      LPROC_LL_DIRTY_HITS);
474         }
475
476         /* put the page in the page cache, from now on ll_removepage is
477          * responsible for cleaning up the llap */
478         set_page_dirty(page);
479
480 out:
481         if (rc == 0) {
482                 size = (((obd_off)page->index) << PAGE_SHIFT) + to;
483                 lov_increase_kms(exp, lsm, size);
484                 if (size > inode->i_size)
485                         inode->i_size = size;
486                 SetPageUptodate(page);
487         }
488         RETURN(rc);
489 }
490
491 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
492 {
493         unsigned long ret;
494         ENTRY;
495
496         spin_lock(&sbi->ll_lock);
497         ret = min(sbi->ll_max_read_ahead_pages - sbi->ll_read_ahead_pages,
498                   len);
499         sbi->ll_read_ahead_pages += ret;
500         spin_unlock(&sbi->ll_lock);
501
502         RETURN(ret);
503 }
504
505 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
506 {
507         spin_lock(&sbi->ll_lock);
508         LASSERTF(sbi->ll_read_ahead_pages >= len, "r_a_p %lu len %lu\n",
509                  sbi->ll_read_ahead_pages, len);
510         sbi->ll_read_ahead_pages -= len;
511         spin_unlock(&sbi->ll_lock);
512 }
513
514 /* called for each page in a completed rpc.*/
515 void ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
516 {
517         struct ll_async_page *llap;
518         struct page *page;
519         ENTRY;
520
521         llap = llap_from_cookie(data);
522         if (IS_ERR(llap)) {
523                 EXIT;
524                 return;
525         }
526
527         page = llap->llap_page;
528         LASSERT(PageLocked(page));
529
530         LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
531
532         if (cmd == OBD_BRW_READ && llap->llap_defer_uptodate)
533                 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
534
535         if (rc == 0)  {
536                 if (cmd == OBD_BRW_READ) {
537                         if (!llap->llap_defer_uptodate)
538                                 SetPageUptodate(page);
539                 } else {
540                         llap->llap_write_queued = 0;
541                 }
542                 ClearPageError(page);
543         } else {
544                 if (cmd == OBD_BRW_READ)
545                         llap->llap_defer_uptodate = 0;
546                 SetPageError(page);
547         }
548
549         unlock_page(page);
550
551         if (0 && cmd == OBD_BRW_WRITE) {
552                 llap_write_complete(page->mapping->host, llap);
553                 ll_try_done_writing(page->mapping->host);
554         }
555
556         page_cache_release(page);
557         EXIT;
558 }
559
560 /* the kernel calls us here when a page is unhashed from the page cache.
561  * the page will be locked and the kernel is holding a spinlock, so
562  * we need to be careful.  we're just tearing down our book-keeping
563  * here. */
564 void ll_removepage(struct page *page)
565 {
566         struct inode *inode = page->mapping->host;
567         struct obd_export *exp;
568         struct ll_async_page *llap;
569         struct ll_sb_info *sbi = ll_i2sbi(inode);
570         int rc;
571         ENTRY;
572
573         LASSERT(!in_interrupt());
574
575         /* sync pages or failed read pages can leave pages in the page
576          * cache that don't have our data associated with them anymore */
577         if (page->private == 0) {
578                 EXIT;
579                 return;
580         }
581
582         LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
583
584         exp = ll_i2obdexp(inode);
585         if (exp == NULL) {
586                 CERROR("page %p ind %lu gave null export\n", page, page->index);
587                 EXIT;
588                 return;
589         }
590
591         llap = llap_from_page(page);
592         if (IS_ERR(llap)) {
593                 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
594                        page->index, PTR_ERR(llap));
595                 EXIT;
596                 return;
597         }
598
599         //llap_write_complete(inode, llap);
600         rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
601                                      llap->llap_cookie);
602         if (rc != 0)
603                 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
604
605         /* this unconditional free is only safe because the page lock
606          * is providing exclusivity to memory pressure/truncate/writeback..*/
607         page->private = 0;
608
609         spin_lock(&sbi->ll_lock);
610         if (!list_empty(&llap->llap_proc_item))
611                 list_del_init(&llap->llap_proc_item);
612         sbi->ll_pglist_gen++;
613         spin_unlock(&sbi->ll_lock);
614         OBD_FREE(llap, sizeof(*llap));
615         EXIT;
616 }
617
618 static int ll_page_matches(struct page *page, int fd_flags)
619 {
620         struct lustre_handle match_lockh = {0};
621         struct inode *inode = page->mapping->host;
622         ldlm_policy_data_t page_extent;
623         int flags, matches;
624         ENTRY;
625
626         if (fd_flags & LL_FILE_GROUP_LOCKED)
627                 RETURN(1);
628
629         page_extent.l_extent.start = (__u64)page->index << PAGE_CACHE_SHIFT;
630         page_extent.l_extent.end =
631                 page_extent.l_extent.start + PAGE_CACHE_SIZE - 1;
632         flags = LDLM_FL_CBPENDING | LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
633         matches = obd_match(ll_i2sbi(inode)->ll_osc_exp,
634                             ll_i2info(inode)->lli_smd, LDLM_EXTENT,
635                             &page_extent, LCK_PR | LCK_PW, &flags, inode,
636                             &match_lockh);
637         RETURN(matches);
638 }
639
640 static int ll_issue_page_read(struct obd_export *exp,
641                               struct ll_async_page *llap,
642                               struct obd_io_group *oig, int defer)
643 {
644         struct page *page = llap->llap_page;
645         int rc;
646
647         page_cache_get(page);
648         llap->llap_defer_uptodate = defer;
649         rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
650                                 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
651                                 PAGE_SIZE, 0, ASYNC_COUNT_STABLE | ASYNC_READY
652                                               | ASYNC_URGENT);
653         if (rc) {
654                 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
655                 page_cache_release(page);
656         }
657         RETURN(rc);
658 }
659
660 #define RAS_CDEBUG(ras) \
661         CDEBUG(D_READA, "lrp %lu c %lu ws %lu wl %lu nra %lu\n",        \
662                ras->ras_last_readpage, ras->ras_consecutive,            \
663                ras->ras_window_start, ras->ras_window_len,              \
664                ras->ras_next_readahead);
665
666 static int ll_readahead(struct ll_readahead_state *ras,
667                          struct obd_export *exp, struct address_space *mapping,
668                          struct obd_io_group *oig, int flags)
669 {
670         unsigned long i, start = 0, end = 0, reserved;
671         struct ll_async_page *llap;
672         struct page *page;
673         int rc, ret = 0;
674         __u64 kms;
675         ENTRY;
676
677         kms = lov_merge_size(ll_i2info(mapping->host)->lli_smd, 1);
678         if (kms == 0)
679                 RETURN(0);
680
681         spin_lock(&ras->ras_lock);
682
683         if (ras->ras_window_len) {
684                 start = ras->ras_next_readahead;
685                 end = ras->ras_window_start + ras->ras_window_len - 1;
686                 end = min(end, (unsigned long)(kms >> PAGE_CACHE_SHIFT));
687                 ras->ras_next_readahead = max(end, end + 1);
688
689                 RAS_CDEBUG(ras);
690         }
691
692         spin_unlock(&ras->ras_lock);
693
694         if (end == 0)
695                 RETURN(0);
696
697         reserved = ll_ra_count_get(ll_i2sbi(mapping->host), end - start + 1);
698
699         for (i = start; reserved > 0 && i <= end; i++) {
700                 /* skip locked pages from previous readpage calls */
701                 page = grab_cache_page_nowait(mapping, i);
702                 if (page == NULL) {
703                         CDEBUG(D_READA, "g_c_p_n failed\n");
704                         continue;
705                 }
706                 
707                 /* we do this first so that we can see the page in the /proc
708                  * accounting */
709                 llap = llap_from_page(page);
710                 if (IS_ERR(llap) || llap->llap_defer_uptodate)
711                         goto next_page;
712
713                 /* skip completed pages */
714                 if (Page_Uptodate(page))
715                         goto next_page;
716
717                 /* bail when we hit the end of the lock. */
718                 if ((rc = ll_page_matches(page, flags)) <= 0) {
719                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
720                                        "lock match failed: rc %d\n", rc);
721                         i = end;
722                         goto next_page;
723                 }
724
725                 rc = ll_issue_page_read(exp, llap, oig, 1);
726                 if (rc == 0) {
727                         reserved--;
728                         ret++;
729                         LL_CDEBUG_PAGE(D_READA| D_PAGE, page, 
730                                        "started read-ahead\n");
731                 }
732                 if (rc) {
733         next_page:
734                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page, 
735                                        "skipping read-ahead\n");
736
737                         unlock_page(page);
738                 }
739                 page_cache_release(page);
740         }
741
742         LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
743         if (reserved != 0)
744                 ll_ra_count_put(ll_i2sbi(mapping->host), reserved);
745         RETURN(ret);
746 }
747
748 static void ras_set_start(struct ll_readahead_state *ras,
749                                unsigned long index)
750 {
751         ras->ras_window_start = index & (~(PTLRPC_MAX_BRW_PAGES - 1));
752         ras->ras_next_readahead = max(ras->ras_window_start,
753                                       ras->ras_next_readahead);
754 }
755
756 /* called with the ras_lock held or from places where it doesn't matter */
757 static void ras_reset(struct ll_readahead_state *ras,
758                       unsigned long index)
759 {
760         ras->ras_last_readpage = index;
761         ras->ras_consecutive = 1;
762         ras->ras_window_len = 0;
763         ras_set_start(ras, index);
764         ras->ras_next_readahead = ras->ras_window_start;
765
766         RAS_CDEBUG(ras);
767 }
768
769 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
770 {
771         spin_lock_init(&ras->ras_lock);
772         ras_reset(ras, 0);
773 }
774
775 static void ras_update(struct ll_readahead_state *ras,
776                        unsigned long index, unsigned long max)
777 {
778         ENTRY;
779
780         spin_lock(&ras->ras_lock);
781
782         if (index != ras->ras_last_readpage + 1) {
783                 ras_reset(ras, index);
784                 GOTO(out_unlock, 0);
785         }
786
787         ras->ras_last_readpage = index;
788         ras->ras_consecutive++;
789         ras_set_start(ras, index);
790
791         if (ras->ras_consecutive == 2) {
792                 ras->ras_window_len = PTLRPC_MAX_BRW_PAGES;
793                 GOTO(out_unlock, 0);
794         }
795
796         /* we need to increase the window sometimes.  we'll arbitrarily
797          * do it half-way through the pages in an rpc */
798         if ((index & (PTLRPC_MAX_BRW_PAGES - 1)) == 
799             (PTLRPC_MAX_BRW_PAGES >> 1)) {
800                 ras->ras_window_len += PTLRPC_MAX_BRW_PAGES;
801                 ras->ras_window_len = min(ras->ras_window_len, max);
802         }
803
804         EXIT;
805 out_unlock:
806         RAS_CDEBUG(ras);
807         spin_unlock(&ras->ras_lock);
808         return;
809 }
810
811 /*
812  * for now we do our readpage the same on both 2.4 and 2.5.  The kernel's
813  * read-ahead assumes it is valid to issue readpage all the way up to
814  * i_size, but our dlm locks make that not the case.  We disable the
815  * kernel's read-ahead and do our own by walking ahead in the page cache
816  * checking for dlm lock coverage.  the main difference between 2.4 and
817  * 2.6 is how read-ahead gets batched and issued, but we're using our own,
818  * so they look the same.
819  */
820 int ll_readpage(struct file *filp, struct page *page)
821 {
822         struct ll_file_data *fd = filp->private_data;
823         struct inode *inode = page->mapping->host;
824         struct obd_export *exp;
825         struct ll_async_page *llap;
826         struct obd_io_group *oig = NULL;
827         int rc;
828         ENTRY;
829
830         LASSERT(PageLocked(page));
831         LASSERT(!PageUptodate(page));
832         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset="LPX64"\n",
833                inode->i_ino, inode->i_generation, inode,
834                (((obd_off)page->index) << PAGE_SHIFT));
835         LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
836
837         rc = oig_init(&oig);
838         if (rc < 0)
839                 GOTO(out, rc);
840
841         exp = ll_i2obdexp(inode);
842         if (exp == NULL)
843                 GOTO(out, rc = -EINVAL);
844
845         llap = llap_from_page(page);
846         if (IS_ERR(llap))
847                 GOTO(out, rc = PTR_ERR(llap));
848
849         if (ll_i2sbi(inode)->ll_flags & LL_SBI_READAHEAD)
850                 ras_update(&fd->fd_ras, page->index, 
851                            ll_i2sbi(inode)->ll_max_read_ahead_pages);
852
853         if (llap->llap_defer_uptodate) {
854                 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
855                                   fd->fd_flags);
856                 if (rc > 0)
857                         obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, 
858                                              NULL, oig);
859                 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
860                 SetPageUptodate(page);
861                 unlock_page(page);
862                 GOTO(out_oig, rc = 0);
863         }
864
865         rc = ll_page_matches(page, fd->fd_flags);
866         if (rc < 0) {
867                 LL_CDEBUG_PAGE(D_ERROR, page, "lock match failed: rc %d\n", rc);
868                 GOTO(out, rc);
869         }
870
871         if (rc == 0) {
872                 static unsigned long next_print;
873                 CDEBUG(D_INODE, "ino %lu page %lu (%llu) didn't match a lock\n",
874                        inode->i_ino, page->index,
875                        (long long)page->index << PAGE_CACHE_SHIFT);
876                 if (time_after(jiffies, next_print)) {
877                         CWARN("ino %lu page %lu (%llu) not covered by "
878                                "a lock (mmap?).  check debug logs.\n",
879                                inode->i_ino, page->index,
880                                (long long)page->index << PAGE_CACHE_SHIFT);
881                         next_print = jiffies + 30 * HZ;
882                 }
883         }
884
885         rc = ll_issue_page_read(exp, llap, oig, 0);
886         if (rc)
887                 GOTO(out, rc);
888
889         LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
890         if (ll_i2sbi(inode)->ll_flags & LL_SBI_READAHEAD)
891                 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
892                              fd->fd_flags);
893
894         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
895
896 out:
897         if (rc)
898                 unlock_page(page);
899 out_oig:
900         if (oig != NULL)
901                 oig_release(oig);
902         RETURN(rc);
903 }