Whamcloud - gitweb
b=3119
[fs/lustre-release.git] / lustre / llite / rw.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Lite I/O page cache routines shared by different kernel revs
5  *
6  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #include <linux/config.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/string.h>
28 #include <linux/stat.h>
29 #include <linux/errno.h>
30 #include <linux/smp_lock.h>
31 #include <linux/unistd.h>
32 #include <linux/version.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
35
36 #include <linux/fs.h>
37 #include <linux/stat.h>
38 #include <asm/uaccess.h>
39 #include <asm/segment.h>
40 #include <linux/mm.h>
41 #include <linux/pagemap.h>
42 #include <linux/smp_lock.h>
43
44 #define DEBUG_SUBSYSTEM S_LLITE
45
46 #include <linux/lustre_mds.h>
47 #include <linux/lustre_lite.h>
48 #include "llite_internal.h"
49 #include <linux/lustre_compat25.h>
50
51 #ifndef list_for_each_prev_safe
52 #define list_for_each_prev_safe(pos, n, head) \
53         for (pos = (head)->prev, n = pos->prev; pos != (head); \
54                 pos = n, n = pos->prev )
55 #endif
56
57 /* SYNCHRONOUS I/O to object storage for an inode */
58 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
59                   struct page *page, int flags)
60 {
61         struct ll_inode_info *lli = ll_i2info(inode);
62         struct lov_stripe_md *lsm = lli->lli_smd;
63         struct brw_page pg;
64         int rc;
65         ENTRY;
66
67         pg.pg = page;
68         pg.off = ((obd_off)page->index) << PAGE_SHIFT;
69
70         if (cmd == OBD_BRW_WRITE && (pg.off + PAGE_SIZE > inode->i_size))
71                 pg.count = inode->i_size % PAGE_SIZE;
72         else
73                 pg.count = PAGE_SIZE;
74
75         CDEBUG(D_PAGE, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
76                cmd & OBD_BRW_WRITE ? "write" : "read", pg.count, inode->i_ino,
77                pg.off, pg.off);
78         if (pg.count == 0) {
79                 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
80                        LPU64"\n",
81                        inode->i_ino, inode, inode->i_size, page->mapping->host,
82                        page->mapping->host->i_size, page->index, pg.off);
83         }
84
85         pg.flag = flags;
86
87         if (cmd == OBD_BRW_WRITE)
88                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
89                                     LPROC_LL_BRW_WRITE, pg.count);
90         else
91                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
92                                     LPROC_LL_BRW_READ, pg.count);
93         rc = obd_brw(cmd, ll_i2obdexp(inode), oa, lsm, 1, &pg, NULL);
94         if (rc == 0)
95                 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
96         else if (rc != -EIO)
97                 CERROR("error from obd_brw: rc = %d\n", rc);
98         RETURN(rc);
99 }
100
101 /* this isn't where truncate starts.   roughly:
102  * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate
103  * we grab the lock back in setattr_raw to avoid races. */
104 void ll_truncate(struct inode *inode)
105 {
106         struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
107         struct obdo oa;
108         int rc;
109         ENTRY;
110         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
111                inode->i_generation, inode);
112
113         if (!lsm) {
114                 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
115                        inode->i_ino);
116                 EXIT;
117                 return;
118         }
119
120         oa.o_id = lsm->lsm_object_id;
121         oa.o_valid = OBD_MD_FLID;
122         obdo_from_inode(&oa, inode, OBD_MD_FLTYPE|OBD_MD_FLMODE|OBD_MD_FLATIME|
123                                     OBD_MD_FLMTIME | OBD_MD_FLCTIME);
124
125         CDEBUG(D_INFO, "calling punch for "LPX64" (all bytes after %Lu)\n",
126                oa.o_id, inode->i_size);
127
128         /* truncate == punch from new size to absolute end of file */
129         /* NB: obd_punch must be called with i_sem held!  It updates the kms! */
130         rc = obd_punch(ll_i2obdexp(inode), &oa, lsm, inode->i_size,
131                        OBD_OBJECT_EOF, NULL);
132         if (rc)
133                 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
134         else
135                 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
136                                           OBD_MD_FLATIME | OBD_MD_FLMTIME |
137                                           OBD_MD_FLCTIME);
138
139         EXIT;
140         return;
141 } /* ll_truncate */
142
143 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
144 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
145                      unsigned to)
146 {
147         struct inode *inode = page->mapping->host;
148         struct ll_inode_info *lli = ll_i2info(inode);
149         struct lov_stripe_md *lsm = lli->lli_smd;
150         obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
151         struct brw_page pga;
152         struct obdo oa;
153         __u64 kms;
154         int rc = 0;
155         ENTRY;
156
157         LASSERT(PageLocked(page));
158         (void)llap_cast_private(page); /* assertion */
159
160         /* Check to see if we should return -EIO right away */
161         pga.pg = page;
162         pga.off = offset;
163         pga.count = PAGE_SIZE;
164         pga.flag = 0;
165
166         oa.o_id = lsm->lsm_object_id;
167         oa.o_mode = inode->i_mode;
168         oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | OBD_MD_FLTYPE;
169
170         rc = obd_brw(OBD_BRW_CHECK, ll_i2obdexp(inode), &oa, lsm, 1, &pga,
171                      NULL);
172         if (rc)
173                 RETURN(rc);
174
175         if (PageUptodate(page))
176                 RETURN(0);
177
178         /* We're completely overwriting an existing page, so _don't_ set it up
179          * to date until commit_write */
180         if (from == 0 && to == PAGE_SIZE) {
181                 POISON_PAGE(page, 0x11);
182                 RETURN(0);
183         }
184
185         /* If are writing to a new page, no need to read old data.  The extent
186          * locking will have updated the KMS, and for our purposes here we can
187          * treat it like i_size. */
188         kms = lov_merge_size(lsm, 1);
189         if (kms <= offset) {
190                 memset(kmap(page), 0, PAGE_SIZE);
191                 kunmap(page);
192                 GOTO(prepare_done, rc = 0);
193         }
194
195         /* XXX could be an async ocp read.. read-ahead? */
196         rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
197         if (rc == 0) {
198                 /* bug 1598: don't clobber blksize */
199                 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
200                 obdo_refresh_inode(inode, &oa, oa.o_valid);
201         }
202
203         EXIT;
204  prepare_done:
205         if (rc == 0)
206                 SetPageUptodate(page);
207
208         return rc;
209 }
210
211 struct ll_async_page *llap_from_cookie(void *cookie)
212 {
213         struct ll_async_page *llap = cookie;
214         if (llap->llap_magic != LLAP_MAGIC)
215                 return ERR_PTR(-EINVAL);
216         return llap;
217 };
218
219 static int ll_ap_make_ready(void *data, int cmd)
220 {
221         struct ll_async_page *llap;
222         struct page *page;
223         ENTRY;
224
225         llap = llap_from_cookie(data);
226         if (IS_ERR(llap))
227                 RETURN(-EINVAL);
228
229         page = llap->llap_page;
230
231         LASSERT(cmd != OBD_BRW_READ);
232
233         /* we're trying to write, but the page is locked.. come back later */
234         if (TryLockPage(page))
235                 RETURN(-EAGAIN);
236
237         LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
238         page_cache_get(page);
239
240         /* if we left PageDirty we might get another writepage call
241          * in the future.  list walkers are bright enough
242          * to check page dirty so we can leave it on whatever list
243          * its on.  XXX also, we're called with the cli list so if
244          * we got the page cache list we'd create a lock inversion
245          * with the removepage path which gets the page lock then the
246          * cli lock */
247         clear_page_dirty(page);
248         RETURN(0);
249 }
250
251 /* We have two reasons for giving llite the opportunity to change the 
252  * write length of a given queued page as it builds the RPC containing
253  * the page: 
254  *
255  * 1) Further extending writes may have landed in the page cache
256  *    since a partial write first queued this page requiring us
257  *    to write more from the page cache.
258  * 2) We might have raced with truncate and want to avoid performing
259  *    write RPCs that are just going to be thrown away by the 
260  *    truncate's punch on the storage targets.
261  *
262  * The kms serves these purposes as it is set at both truncate and extending
263  * writes.
264  */
265 static int ll_ap_refresh_count(void *data, int cmd)
266 {
267         struct ll_async_page *llap;
268         struct lov_stripe_md *lsm;
269         struct page *page;
270         __u64 kms;
271         ENTRY;
272
273         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
274         LASSERT(cmd != OBD_BRW_READ);
275
276         llap = llap_from_cookie(data);
277         if (IS_ERR(llap))
278                 RETURN(PTR_ERR(llap));
279
280         page = llap->llap_page;
281         lsm = ll_i2info(page->mapping->host)->lli_smd;
282         kms = lov_merge_size(lsm, 1);
283
284         /* catch race with truncate */
285         if (((__u64)page->index << PAGE_SHIFT) >= kms)
286                 return 0;
287
288         /* catch sub-page write at end of file */
289         if (((__u64)page->index << PAGE_SHIFT) + PAGE_SIZE > kms)
290                 return kms % PAGE_SIZE;
291
292         return PAGE_SIZE;
293 }
294
295 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
296 {
297         struct lov_stripe_md *lsm;
298         obd_flag valid_flags;
299
300         lsm = ll_i2info(inode)->lli_smd;
301
302         oa->o_id = lsm->lsm_object_id;
303         oa->o_valid = OBD_MD_FLID;
304         valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
305         if (cmd == OBD_BRW_WRITE) {
306                 oa->o_valid |= OBD_MD_FLIFID | OBD_MD_FLEPOCH;
307                 mdc_pack_fid(obdo_fid(oa), inode->i_ino, 0, inode->i_mode);
308                 oa->o_easize = ll_i2info(inode)->lli_io_epoch;
309
310                 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME;
311         }
312
313         obdo_from_inode(oa, inode, valid_flags);
314 }
315
316 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
317 {
318         struct ll_async_page *llap;
319         ENTRY;
320
321         llap = llap_from_cookie(data);
322         if (IS_ERR(llap)) {
323                 EXIT;
324                 return;
325         }
326
327         ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
328         EXIT;
329 }
330
331 static struct obd_async_page_ops ll_async_page_ops = {
332         .ap_make_ready =        ll_ap_make_ready,
333         .ap_refresh_count =     ll_ap_refresh_count,
334         .ap_fill_obdo =         ll_ap_fill_obdo,
335         .ap_completion =        ll_ap_completion,
336 };
337
338 struct ll_async_page *llap_cast_private(struct page *page)
339 {
340         struct ll_async_page *llap = (struct ll_async_page *)page->private;
341
342         LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
343                  "page %p private %lu gave magic %d which != %d\n",
344                  page, page->private, llap->llap_magic, LLAP_MAGIC);
345
346         return llap;
347 }
348
349 /* XXX have the exp be an argument? */
350 struct ll_async_page *llap_from_page(struct page *page)
351 {
352         struct ll_async_page *llap;
353         struct obd_export *exp;
354         struct inode *inode = page->mapping->host;
355         struct ll_sb_info *sbi = ll_i2sbi(inode);
356         int rc;
357         ENTRY;
358
359         llap = llap_cast_private(page);
360         if (llap != NULL)
361                 RETURN(llap);
362
363         exp = ll_i2obdexp(page->mapping->host);
364         if (exp == NULL)
365                 RETURN(ERR_PTR(-EINVAL));
366
367         OBD_ALLOC(llap, sizeof(*llap));
368         if (llap == NULL)
369                 RETURN(ERR_PTR(-ENOMEM));
370         llap->llap_magic = LLAP_MAGIC;
371         rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
372                                  (obd_off)page->index << PAGE_SHIFT,
373                                  &ll_async_page_ops, llap, &llap->llap_cookie);
374         if (rc) {
375                 OBD_FREE(llap, sizeof(*llap));
376                 RETURN(ERR_PTR(rc));
377         }
378
379         CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
380                page, llap->llap_cookie, (obd_off)page->index << PAGE_SHIFT);
381         /* also zeroing the PRIVBITS low order bitflags */
382         page->private = (unsigned long)llap;
383         llap->llap_page = page;
384
385         spin_lock(&sbi->ll_lock);
386         sbi->ll_pglist_gen++;
387         list_add_tail(&llap->llap_proc_item, &sbi->ll_pglist);
388         spin_unlock(&sbi->ll_lock);
389
390         RETURN(llap);
391 }
392
393 void lov_increase_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
394                       obd_off size);
395 /* update our write count to account for i_size increases that may have
396  * happened since we've queued the page for io. */
397
398 /* be careful not to return success without setting the page Uptodate or
399  * the next pass through prepare_write will read in stale data from disk. */
400 int ll_commit_write(struct file *file, struct page *page, unsigned from,
401                     unsigned to)
402 {
403         struct inode *inode = page->mapping->host;
404         struct ll_inode_info *lli = ll_i2info(inode);
405         struct lov_stripe_md *lsm = lli->lli_smd;
406         struct obd_export *exp = NULL;
407         struct ll_async_page *llap;
408         loff_t size;
409         int rc = 0;
410         ENTRY;
411
412         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
413         LASSERT(inode == file->f_dentry->d_inode);
414         LASSERT(PageLocked(page));
415
416         CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
417                inode, page, from, to, page->index);
418
419         llap = llap_from_page(page);
420         if (IS_ERR(llap))
421                 RETURN(PTR_ERR(llap));
422
423         /* queue a write for some time in the future the first time we
424          * dirty the page */
425         if (!PageDirty(page)) {
426                 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
427                                      LPROC_LL_DIRTY_MISSES);
428
429                 exp = ll_i2obdexp(inode);
430                 if (exp == NULL)
431                         RETURN(-EINVAL);
432
433                 /* _make_ready only sees llap once we've unlocked the page */
434                 llap->llap_write_queued = 1;
435                 rc = obd_queue_async_io(exp, lsm, NULL, llap->llap_cookie,
436                                         OBD_BRW_WRITE, 0, 0, 0, 0);
437                 if (rc != 0) { /* async failed, try sync.. */
438                         struct obd_io_group *oig;
439                         rc = oig_init(&oig);
440                         if (rc)
441                                 GOTO(out, rc);
442
443                         llap->llap_write_queued = 0;
444                         rc = obd_queue_group_io(exp, lsm, NULL, oig,
445                                                 llap->llap_cookie,
446                                                 OBD_BRW_WRITE, 0, to, 0,
447                                                 ASYNC_READY | ASYNC_URGENT |
448                                                 ASYNC_COUNT_STABLE |
449                                                 ASYNC_GROUP_SYNC);
450
451                         if (rc)
452                                 GOTO(free_oig, rc);
453
454                         rc = obd_trigger_group_io(exp, lsm, NULL, oig);
455                         if (rc)
456                                 GOTO(free_oig, rc);
457
458                         rc = oig_wait(oig);
459 free_oig:
460                         oig_release(oig);
461                         GOTO(out, rc);
462                 }
463                 LL_CDEBUG_PAGE(D_PAGE, page, "write queued\n");
464                 //llap_write_pending(inode, llap);
465         } else {
466                 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
467                                      LPROC_LL_DIRTY_HITS);
468         }
469
470         /* put the page in the page cache, from now on ll_removepage is
471          * responsible for cleaning up the llap */
472         set_page_dirty(page);
473
474 out:
475         if (rc == 0) {
476                 size = (((obd_off)page->index) << PAGE_SHIFT) + to;
477                 lov_increase_kms(exp, lsm, size);
478                 if (size > inode->i_size)
479                         inode->i_size = size;
480                 SetPageUptodate(page);
481         }
482         RETURN(rc);
483 }
484
485 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
486 {
487         unsigned long ret;
488         ENTRY;
489
490         spin_lock(&sbi->ll_lock);
491         ret = min(sbi->ll_max_read_ahead_pages - sbi->ll_read_ahead_pages,
492                   len);
493         sbi->ll_read_ahead_pages += ret;
494         spin_unlock(&sbi->ll_lock);
495
496         RETURN(ret);
497 }
498
499 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
500 {
501         spin_lock(&sbi->ll_lock);
502         LASSERTF(sbi->ll_read_ahead_pages >= len, "r_a_p %lu len %lu\n",
503                  sbi->ll_read_ahead_pages, len);
504         sbi->ll_read_ahead_pages -= len;
505         spin_unlock(&sbi->ll_lock);
506 }
507
508 /* called for each page in a completed rpc.*/
509 void ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
510 {
511         struct ll_async_page *llap;
512         struct page *page;
513         ENTRY;
514
515         llap = llap_from_cookie(data);
516         if (IS_ERR(llap)) {
517                 EXIT;
518                 return;
519         }
520
521         page = llap->llap_page;
522         LASSERT(PageLocked(page));
523
524         LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
525
526         if (cmd == OBD_BRW_READ && llap->llap_defer_uptodate)
527                 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
528
529         if (rc == 0)  {
530                 if (cmd == OBD_BRW_READ) {
531                         if (!llap->llap_defer_uptodate)
532                                 SetPageUptodate(page);
533                 } else {
534                         llap->llap_write_queued = 0;
535                 }
536                 ClearPageError(page);
537         } else {
538                 if (cmd == OBD_BRW_READ)
539                         llap->llap_defer_uptodate = 0;
540                 SetPageError(page);
541         }
542
543         unlock_page(page);
544
545         if (0 && cmd == OBD_BRW_WRITE) {
546                 llap_write_complete(page->mapping->host, llap);
547                 ll_try_done_writing(page->mapping->host);
548         }
549
550         page_cache_release(page);
551         EXIT;
552 }
553
554 /* the kernel calls us here when a page is unhashed from the page cache.
555  * the page will be locked and the kernel is holding a spinlock, so
556  * we need to be careful.  we're just tearing down our book-keeping
557  * here. */
558 void ll_removepage(struct page *page)
559 {
560         struct inode *inode = page->mapping->host;
561         struct obd_export *exp;
562         struct ll_async_page *llap;
563         struct ll_sb_info *sbi = ll_i2sbi(inode);
564         int rc;
565         ENTRY;
566
567         LASSERT(!in_interrupt());
568
569         /* sync pages or failed read pages can leave pages in the page
570          * cache that don't have our data associated with them anymore */
571         if (page->private == 0) {
572                 EXIT;
573                 return;
574         }
575
576         LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
577
578         exp = ll_i2obdexp(inode);
579         if (exp == NULL) {
580                 CERROR("page %p ind %lu gave null export\n", page, page->index);
581                 EXIT;
582                 return;
583         }
584
585         llap = llap_from_page(page);
586         if (IS_ERR(llap)) {
587                 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
588                        page->index, PTR_ERR(llap));
589                 EXIT;
590                 return;
591         }
592
593         //llap_write_complete(inode, llap);
594         rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
595                                      llap->llap_cookie);
596         if (rc != 0)
597                 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
598
599         /* this unconditional free is only safe because the page lock
600          * is providing exclusivity to memory pressure/truncate/writeback..*/
601         page->private = 0;
602
603         spin_lock(&sbi->ll_lock);
604         if (!list_empty(&llap->llap_proc_item))
605                 list_del_init(&llap->llap_proc_item);
606         sbi->ll_pglist_gen++;
607         spin_unlock(&sbi->ll_lock);
608         OBD_FREE(llap, sizeof(*llap));
609         EXIT;
610 }
611
612 static int ll_page_matches(struct page *page, int fd_flags)
613 {
614         struct lustre_handle match_lockh = {0};
615         struct inode *inode = page->mapping->host;
616         ldlm_policy_data_t page_extent;
617         int flags, matches;
618         ENTRY;
619
620         if (fd_flags & LL_FILE_GROUP_LOCKED)
621                 RETURN(1);
622
623         page_extent.l_extent.start = (__u64)page->index << PAGE_CACHE_SHIFT;
624         page_extent.l_extent.end =
625                 page_extent.l_extent.start + PAGE_CACHE_SIZE - 1;
626         flags = LDLM_FL_CBPENDING | LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
627         matches = obd_match(ll_i2sbi(inode)->ll_osc_exp,
628                             ll_i2info(inode)->lli_smd, LDLM_EXTENT,
629                             &page_extent, LCK_PR | LCK_PW, &flags, inode,
630                             &match_lockh);
631         RETURN(matches);
632 }
633
634 static int ll_issue_page_read(struct obd_export *exp,
635                               struct ll_async_page *llap,
636                               struct obd_io_group *oig, int defer)
637 {
638         struct page *page = llap->llap_page;
639         int rc;
640
641         page_cache_get(page);
642         llap->llap_defer_uptodate = defer;
643         rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
644                                 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
645                                 PAGE_SIZE, 0, ASYNC_COUNT_STABLE | ASYNC_READY
646                                               | ASYNC_URGENT);
647         if (rc) {
648                 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
649                 page_cache_release(page);
650         }
651         RETURN(rc);
652 }
653
654 #define RAS_CDEBUG(ras) \
655         CDEBUG(D_READA, "lrp %lu c %lu ws %lu wl %lu nra %lu\n",        \
656                ras->ras_last_readpage, ras->ras_consecutive,            \
657                ras->ras_window_start, ras->ras_window_len,              \
658                ras->ras_next_readahead);
659
660 static int ll_readahead(struct ll_readahead_state *ras,
661                          struct obd_export *exp, struct address_space *mapping,
662                          struct obd_io_group *oig, int flags)
663 {
664         unsigned long i, start = 0, end = 0, reserved;
665         struct ll_async_page *llap;
666         struct page *page;
667         int rc, ret = 0;
668         __u64 kms;
669         ENTRY;
670
671         kms = lov_merge_size(ll_i2info(mapping->host)->lli_smd, 1);
672         if (kms == 0)
673                 RETURN(0);
674
675         spin_lock(&ras->ras_lock);
676
677         if (ras->ras_window_len) {
678                 start = ras->ras_next_readahead;
679                 end = ras->ras_window_start + ras->ras_window_len - 1;
680                 end = min(end, (unsigned long)(kms >> PAGE_CACHE_SHIFT));
681                 ras->ras_next_readahead = max(end, end + 1);
682
683                 RAS_CDEBUG(ras);
684         }
685
686         spin_unlock(&ras->ras_lock);
687
688         if (end == 0)
689                 RETURN(0);
690
691         reserved = ll_ra_count_get(ll_i2sbi(mapping->host), end - start + 1);
692
693         for (i = start; reserved > 0 && i <= end; i++) {
694                 /* skip locked pages from previous readpage calls */
695                 page = grab_cache_page_nowait(mapping, i);
696                 if (page == NULL) {
697                         CDEBUG(D_READA, "g_c_p_n failed\n");
698                         continue;
699                 }
700                 
701                 /* we do this first so that we can see the page in the /proc
702                  * accounting */
703                 llap = llap_from_page(page);
704                 if (IS_ERR(llap) || llap->llap_defer_uptodate)
705                         goto next_page;
706
707                 /* skip completed pages */
708                 if (Page_Uptodate(page))
709                         goto next_page;
710
711                 /* bail when we hit the end of the lock. */
712                 if ((rc = ll_page_matches(page, flags)) <= 0) {
713                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
714                                        "lock match failed: rc %d\n", rc);
715                         i = end;
716                         goto next_page;
717                 }
718
719                 rc = ll_issue_page_read(exp, llap, oig, 1);
720                 if (rc == 0) {
721                         reserved--;
722                         ret++;
723                         LL_CDEBUG_PAGE(D_READA| D_PAGE, page, 
724                                        "started read-ahead\n");
725                 }
726                 if (rc) {
727         next_page:
728                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page, 
729                                        "skipping read-ahead\n");
730
731                         unlock_page(page);
732                 }
733                 page_cache_release(page);
734         }
735
736         LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
737         if (reserved != 0)
738                 ll_ra_count_put(ll_i2sbi(mapping->host), reserved);
739         RETURN(ret);
740 }
741
742 static void ras_set_start(struct ll_readahead_state *ras,
743                                unsigned long index)
744 {
745         ras->ras_window_start = index & (~(PTLRPC_MAX_BRW_PAGES - 1));
746         ras->ras_next_readahead = max(ras->ras_window_start,
747                                       ras->ras_next_readahead);
748 }
749
750 /* called with the ras_lock held or from places where it doesn't matter */
751 static void ras_reset(struct ll_readahead_state *ras,
752                       unsigned long index)
753 {
754         ras->ras_last_readpage = index;
755         ras->ras_consecutive = 1;
756         ras->ras_window_len = 0;
757         ras_set_start(ras, index);
758         ras->ras_next_readahead = ras->ras_window_start;
759
760         RAS_CDEBUG(ras);
761 }
762
763 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
764 {
765         spin_lock_init(&ras->ras_lock);
766         ras_reset(ras, 0);
767 }
768
769 static void ras_update(struct ll_readahead_state *ras,
770                        unsigned long index, unsigned long max)
771 {
772         ENTRY;
773
774         spin_lock(&ras->ras_lock);
775
776         if (index != ras->ras_last_readpage + 1) {
777                 ras_reset(ras, index);
778                 GOTO(out_unlock, 0);
779         }
780
781         ras->ras_last_readpage = index;
782         ras->ras_consecutive++;
783         ras_set_start(ras, index);
784
785         if (ras->ras_consecutive == 2) {
786                 ras->ras_window_len = PTLRPC_MAX_BRW_PAGES;
787                 GOTO(out_unlock, 0);
788         }
789
790         /* we need to increase the window sometimes.  we'll arbitrarily
791          * do it half-way through the pages in an rpc */
792         if ((index & (PTLRPC_MAX_BRW_PAGES - 1)) == 
793             (PTLRPC_MAX_BRW_PAGES >> 1)) {
794                 ras->ras_window_len += PTLRPC_MAX_BRW_PAGES;
795                 ras->ras_window_len = min(ras->ras_window_len, max);
796         }
797
798         EXIT;
799 out_unlock:
800         RAS_CDEBUG(ras);
801         spin_unlock(&ras->ras_lock);
802         return;
803 }
804
805 /*
806  * for now we do our readpage the same on both 2.4 and 2.5.  The kernel's
807  * read-ahead assumes it is valid to issue readpage all the way up to
808  * i_size, but our dlm locks make that not the case.  We disable the
809  * kernel's read-ahead and do our own by walking ahead in the page cache
810  * checking for dlm lock coverage.  the main difference between 2.4 and
811  * 2.6 is how read-ahead gets batched and issued, but we're using our own,
812  * so they look the same.
813  */
814 int ll_readpage(struct file *filp, struct page *page)
815 {
816         struct ll_file_data *fd = filp->private_data;
817         struct inode *inode = page->mapping->host;
818         struct obd_export *exp;
819         struct ll_async_page *llap;
820         struct obd_io_group *oig = NULL;
821         int rc;
822         ENTRY;
823
824         LASSERT(PageLocked(page));
825         LASSERT(!PageUptodate(page));
826         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset="LPX64"\n",
827                inode->i_ino, inode->i_generation, inode,
828                (((obd_off)page->index) << PAGE_SHIFT));
829         LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
830
831         rc = oig_init(&oig);
832         if (rc < 0)
833                 GOTO(out, rc);
834
835         exp = ll_i2obdexp(inode);
836         if (exp == NULL)
837                 GOTO(out, rc = -EINVAL);
838
839         llap = llap_from_page(page);
840         if (IS_ERR(llap))
841                 GOTO(out, rc = PTR_ERR(llap));
842
843         if (ll_i2sbi(inode)->ll_flags & LL_SBI_READAHEAD)
844                 ras_update(&fd->fd_ras, page->index, 
845                            ll_i2sbi(inode)->ll_max_read_ahead_pages);
846
847         if (llap->llap_defer_uptodate) {
848                 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
849                                   fd->fd_flags);
850                 if (rc > 0)
851                         obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, 
852                                              NULL, oig);
853                 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
854                 SetPageUptodate(page);
855                 unlock_page(page);
856                 GOTO(out_oig, rc = 0);
857         }
858
859         rc = ll_page_matches(page, fd->fd_flags);
860         if (rc < 0) {
861                 LL_CDEBUG_PAGE(D_ERROR, page, "lock match failed: rc %d\n", rc);
862                 GOTO(out, rc);
863         }
864
865         if (rc == 0) {
866                 static unsigned long next_print;
867                 CDEBUG(D_INODE, "ino %lu page %lu (%llu) didn't match a lock\n",
868                        inode->i_ino, page->index,
869                        (long long)page->index << PAGE_CACHE_SHIFT);
870                 if (time_after(jiffies, next_print)) {
871                         CWARN("ino %lu page %lu (%llu) not covered by "
872                                "a lock (mmap?).  check debug logs.\n",
873                                inode->i_ino, page->index,
874                                (long long)page->index << PAGE_CACHE_SHIFT);
875                         next_print = jiffies + 30 * HZ;
876                 }
877         }
878
879         rc = ll_issue_page_read(exp, llap, oig, 0);
880         if (rc)
881                 GOTO(out, rc);
882
883         LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
884         if (ll_i2sbi(inode)->ll_flags & LL_SBI_READAHEAD)
885                 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
886                              fd->fd_flags);
887
888         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
889
890 out:
891         if (rc)
892                 unlock_page(page);
893 out_oig:
894         if (oig != NULL)
895                 oig_release(oig);
896         RETURN(rc);
897 }