Whamcloud - gitweb
- fixed lossing error code in llog processing during zconf mount. This caused
[fs/lustre-release.git] / lustre / llite / rw.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Lite I/O page cache routines shared by different kernel revs
5  *
6  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #include <linux/config.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/string.h>
28 #include <linux/stat.h>
29 #include <linux/errno.h>
30 #include <linux/smp_lock.h>
31 #include <linux/unistd.h>
32 #include <linux/version.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
35
36 #include <linux/fs.h>
37 #include <linux/stat.h>
38 #include <asm/uaccess.h>
39 #include <asm/segment.h>
40 #include <linux/mm.h>
41 #include <linux/pagemap.h>
42 #include <linux/smp_lock.h>
43
44 #define DEBUG_SUBSYSTEM S_LLITE
45
46 #include <linux/lustre_mds.h>
47 #include <linux/lustre_lite.h>
48 #include "llite_internal.h"
49 #include <linux/lustre_compat25.h>
50
51 #ifndef list_for_each_prev_safe
52 #define list_for_each_prev_safe(pos, n, head) \
53         for (pos = (head)->prev, n = pos->prev; pos != (head); \
54                 pos = n, n = pos->prev )
55 #endif
56
57 /* SYNCHRONOUS I/O to object storage for an inode */
58 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
59                   struct page *page, int flags)
60 {
61         struct ll_inode_info *lli = ll_i2info(inode);
62         struct lov_stripe_md *lsm = lli->lli_smd;
63         struct timeval start;
64         struct brw_page pg;
65         int rc;
66         ENTRY;
67
68         do_gettimeofday(&start);
69
70         pg.pg = page;
71         pg.disk_offset = pg.page_offset = ((obd_off)page->index) << PAGE_SHIFT;
72
73         if (cmd == OBD_BRW_WRITE &&
74             (pg.disk_offset + PAGE_SIZE > inode->i_size))
75                 pg.count = inode->i_size % PAGE_SIZE;
76         else
77                 pg.count = PAGE_SIZE;
78
79         CDEBUG(D_PAGE, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
80                cmd & OBD_BRW_WRITE ? "write" : "read", pg.count, inode->i_ino,
81                pg.disk_offset, pg.disk_offset);
82         if (pg.count == 0) {
83                 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
84                        LPU64"\n", inode->i_ino, inode, inode->i_size,
85                        page->mapping->host, page->mapping->host->i_size,
86                        page->index, pg.disk_offset);
87         }
88
89         pg.flag = flags;
90
91         if (cmd == OBD_BRW_WRITE)
92                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
93                                     LPROC_LL_BRW_WRITE, pg.count);
94         else
95                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
96                                     LPROC_LL_BRW_READ, pg.count);
97         rc = obd_brw(cmd, ll_i2dtexp(inode), oa, lsm, 1, &pg, NULL);
98         if (rc == 0)
99                 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
100         else if (rc != -EIO)
101                 CERROR("error from obd_brw: rc = %d\n", rc);
102         ll_stime_record(ll_i2sbi(inode), &start,
103                         &ll_i2sbi(inode)->ll_brw_stime);
104         RETURN(rc);
105 }
106
107 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
108
109 /*
110  * this isn't where truncate starts.   roughly:
111  * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate
112  * we grab the lock back in setattr_raw to avoid races.
113  *
114  * must be called with lli_size_sem held.
115  */
116 void ll_truncate(struct inode *inode)
117 {
118         struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
119         struct ll_inode_info *lli = ll_i2info(inode);
120         struct obdo *oa = NULL;
121         int rc;
122         ENTRY;
123
124         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %llu\n", inode->i_ino,
125                inode->i_generation, inode, inode->i_size);
126
127         if (!lsm) {
128                 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
129                        inode->i_ino);
130                 GOTO(out_unlock, 0);
131         }
132
133         if (lov_merge_size(lsm, 0) == inode->i_size) {
134                 CDEBUG(D_VFSTRACE, "skipping punch for "LPX64" (size = %llu)\n",
135                        lsm->lsm_object_id, inode->i_size);
136                 GOTO(out_unlock, 0);
137         }
138         
139         CDEBUG(D_INFO, "calling punch for "LPX64" (new size %llu)\n",
140                lsm->lsm_object_id, inode->i_size);
141                 
142         oa = obdo_alloc();
143         if (oa == NULL) {
144                 CERROR("cannot alloc oa, error %d\n",
145                        -ENOMEM);
146                 EXIT;
147                 return;
148         }
149
150         oa->o_id = lsm->lsm_object_id;
151         oa->o_gr = lsm->lsm_object_gr;
152         oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
153         obdo_from_inode(oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |
154                         OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
155
156         obd_adjust_kms(ll_i2dtexp(inode), lsm, inode->i_size, 1);
157
158         LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
159         up(&lli->lli_size_sem);
160         
161         rc = obd_punch(ll_i2dtexp(inode), oa, lsm, inode->i_size,
162                        OBD_OBJECT_EOF, NULL);
163         if (rc)
164                 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
165         else
166                 obdo_to_inode(inode, oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
167                               OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
168
169         obdo_free(oa);
170         
171         EXIT;
172         return;
173         
174 out_unlock:
175         LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
176         up(&lli->lli_size_sem);
177 } /* ll_truncate */
178
179 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
180                      unsigned to)
181 {
182         struct inode *inode = page->mapping->host;
183         struct ll_inode_info *lli = ll_i2info(inode);
184         struct lov_stripe_md *lsm = lli->lli_smd;
185         obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
186         struct obdo *oa = NULL;
187         struct brw_page pga;
188         __u64 kms;
189         int rc = 0;
190         ENTRY;
191
192         LASSERT(PageLocked(page));
193         (void)llap_cast_private(page); /* assertion */
194
195         /* Check to see if we should return -EIO right away */
196         pga.pg = page;
197         pga.disk_offset = pga.page_offset = offset;
198         pga.count = PAGE_SIZE;
199         pga.flag = 0;
200
201         oa = obdo_alloc();
202         if (oa == NULL)
203                 RETURN(-ENOMEM);
204
205         oa->o_id = lsm->lsm_object_id;
206         oa->o_gr = lsm->lsm_object_gr;
207         oa->o_mode = inode->i_mode;
208         oa->o_valid = OBD_MD_FLID | OBD_MD_FLMODE |
209                 OBD_MD_FLTYPE | OBD_MD_FLGROUP;
210
211         rc = obd_brw(OBD_BRW_CHECK, ll_i2dtexp(inode), oa, lsm,
212                      1, &pga, NULL);
213         if (rc)
214                 GOTO(out_free_oa, rc);
215
216         if (PageUptodate(page))
217                 GOTO(out_free_oa, 0);
218
219         /* We're completely overwriting an existing page, so _don't_ set it up
220          * to date until commit_write */
221         if (from == 0 && to == PAGE_SIZE) {
222                 POISON_PAGE(page, 0x11);
223                 GOTO(out_free_oa, 0);
224         }
225
226         /* If are writing to a new page, no need to read old data.  The extent
227          * locking will have updated the KMS, and for our purposes here we can
228          * treat it like i_size. */
229         down(&lli->lli_size_sem);
230         kms = lov_merge_size(lsm, 1);
231         up(&lli->lli_size_sem);
232         if (kms <= offset) {
233                 memset(kmap(page), 0, PAGE_SIZE);
234                 kunmap(page);
235                 GOTO(prepare_done, rc = 0);
236         }
237
238         /* XXX could be an async ocp read.. read-ahead? */
239         rc = ll_brw(OBD_BRW_READ, inode, oa, page, 0);
240         if (rc == 0) {
241                 /* bug 1598: don't clobber blksize */
242                 oa->o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
243                 obdo_refresh_inode(inode, oa, oa->o_valid);
244         }
245
246         EXIT;
247 prepare_done:
248         if (rc == 0)
249                 SetPageUptodate(page);
250 out_free_oa:
251         obdo_free(oa);
252         return rc;
253 }
254
255 static int ll_ap_make_ready(void *data, int cmd)
256 {
257         struct ll_async_page *llap;
258         struct page *page;
259         ENTRY;
260
261         llap = LLAP_FROM_COOKIE(data);
262         page = llap->llap_page;
263
264         LASSERT(cmd != OBD_BRW_READ);
265
266         /* we're trying to write, but the page is locked.. come back later */
267         if (TryLockPage(page))
268                 RETURN(-EAGAIN);
269
270         LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
271         page_cache_get(page);
272
273         /* if we left PageDirty we might get another writepage call
274          * in the future.  list walkers are bright enough
275          * to check page dirty so we can leave it on whatever list
276          * its on.  XXX also, we're called with the cli list so if
277          * we got the page cache list we'd create a lock inversion
278          * with the removepage path which gets the page lock then the
279          * cli lock */
280         clear_page_dirty(page);
281         RETURN(0);
282 }
283
284 /* We have two reasons for giving llite the opportunity to change the 
285  * write length of a given queued page as it builds the RPC containing
286  * the page: 
287  *
288  * 1) Further extending writes may have landed in the page cache
289  *    since a partial write first queued this page requiring us
290  *    to write more from the page cache.
291  * 2) We might have raced with truncate and want to avoid performing
292  *    write RPCs that are just going to be thrown away by the 
293  *    truncate's punch on the storage targets.
294  *
295  * The kms serves these purposes as it is set at both truncate and extending
296  * writes.
297  */
298 static int ll_ap_refresh_count(void *data, int cmd)
299 {
300         struct ll_async_page *llap;
301         struct lov_stripe_md *lsm;
302         struct page *page;
303         __u64 kms;
304         ENTRY;
305
306         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
307         LASSERT(cmd != OBD_BRW_READ);
308
309         llap = LLAP_FROM_COOKIE(data);
310         page = llap->llap_page;
311         lsm = ll_i2info(page->mapping->host)->lli_smd;
312         kms = lov_merge_size(lsm, 1);
313
314         /* catch race with truncate */
315         if (((__u64)page->index << PAGE_SHIFT) >= kms)
316                 return 0;
317
318         /* catch sub-page write at end of file */
319         if (((__u64)page->index << PAGE_SHIFT) + PAGE_SIZE > kms)
320                 return kms % PAGE_SIZE;
321
322         return PAGE_SIZE;
323 }
324
325 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
326 {
327         struct lov_stripe_md *lsm;
328         obd_valid valid_flags;
329
330         lsm = ll_i2info(inode)->lli_smd;
331
332         oa->o_id = lsm->lsm_object_id;
333         oa->o_gr = lsm->lsm_object_gr;
334         oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
335         valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
336         if (cmd == OBD_BRW_WRITE) {
337                 oa->o_valid |= OBD_MD_FLIFID | OBD_MD_FLEPOCH;
338                 mdc_pack_id(obdo_id(oa), inode->i_ino, 0, inode->i_mode, 
339                             id_group(&ll_i2info(inode)->lli_id),
340                             id_fid(&ll_i2info(inode)->lli_id));
341
342                 oa->o_easize = ll_i2info(inode)->lli_io_epoch;
343                 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME;
344         }
345
346         obdo_from_inode(oa, inode, valid_flags);
347 }
348
349 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
350 {
351         struct ll_async_page *llap;
352         ENTRY;
353
354         llap = LLAP_FROM_COOKIE(data);
355         ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
356         EXIT;
357 }
358
359 static struct obd_async_page_ops ll_async_page_ops = {
360         .ap_make_ready =        ll_ap_make_ready,
361         .ap_refresh_count =     ll_ap_refresh_count,
362         .ap_fill_obdo =         ll_ap_fill_obdo,
363         .ap_completion =        ll_ap_completion,
364 };
365
366
367 struct ll_async_page *llap_cast_private(struct page *page)
368 {
369         struct ll_async_page *llap = (struct ll_async_page *)page->private;
370
371         LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
372                  "page %p private %lu gave magic %d which != %d\n",
373                  page, page->private, llap->llap_magic, LLAP_MAGIC);
374
375         return llap;
376 }
377
378 /* XXX have the exp be an argument? */
379 struct ll_async_page *llap_from_page(struct page *page, unsigned origin)
380 {
381         struct ll_async_page *llap;
382         struct obd_export *exp;
383         struct inode *inode = page->mapping->host;
384         struct ll_sb_info *sbi = ll_i2sbi(inode);
385         int rc;
386         ENTRY;
387
388         LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
389
390         llap = llap_cast_private(page);
391         if (llap != NULL)
392                 GOTO(out, llap);
393
394         exp = ll_i2dtexp(page->mapping->host);
395         if (exp == NULL)
396                 RETURN(ERR_PTR(-EINVAL));
397
398         OBD_ALLOC(llap, sizeof(*llap));
399         if (llap == NULL)
400                 RETURN(ERR_PTR(-ENOMEM));
401         llap->llap_magic = LLAP_MAGIC;
402         rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
403                                  (obd_off)page->index << PAGE_SHIFT,
404                                  &ll_async_page_ops, llap, &llap->llap_cookie);
405         if (rc) {
406                 OBD_FREE(llap, sizeof(*llap));
407                 RETURN(ERR_PTR(rc));
408         }
409
410         CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
411                page, llap->llap_cookie, (obd_off)page->index << PAGE_SHIFT);
412         /* also zeroing the PRIVBITS low order bitflags */
413         __set_page_ll_data(page, llap);
414         llap->llap_page = page;
415
416         spin_lock(&sbi->ll_lock);
417         sbi->ll_pglist_gen++;
418         list_add_tail(&llap->llap_proc_item, &sbi->ll_pglist);
419         spin_unlock(&sbi->ll_lock);
420
421 out:
422         llap->llap_origin = origin;
423         RETURN(llap);
424 }
425
426 static int queue_or_sync_write(struct obd_export *exp,
427                                struct lov_stripe_md *lsm,
428                                struct ll_async_page *llap,
429                                unsigned to,
430                                obd_flags async_flags)
431 {
432         struct obd_io_group *oig;
433         int rc;
434         ENTRY;
435
436         /* _make_ready only sees llap once we've unlocked the page */
437         llap->llap_write_queued = 1;
438         rc = obd_queue_async_io(exp, lsm, NULL, llap->llap_cookie,
439                                 OBD_BRW_WRITE, 0, 0, 0, async_flags);
440         if (rc == 0) {
441                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
442                 //llap_write_pending(inode, llap);
443                 GOTO(out, 0);
444         }
445
446         llap->llap_write_queued = 0;
447
448         rc = oig_init(&oig);
449         if (rc)
450                 GOTO(out, rc);
451
452         rc = obd_queue_group_io(exp, lsm, NULL, oig, llap->llap_cookie,
453                                 OBD_BRW_WRITE, 0, to, 0, ASYNC_READY |
454                                 ASYNC_URGENT | ASYNC_COUNT_STABLE |
455                                 ASYNC_GROUP_SYNC);
456         if (rc)
457                 GOTO(free_oig, rc);
458
459         rc = obd_trigger_group_io(exp, lsm, NULL, oig);
460         if (rc)
461                 GOTO(free_oig, rc);
462
463         rc = oig_wait(oig);
464
465         if (!rc && async_flags & ASYNC_READY)
466                 unlock_page(llap->llap_page);
467
468         LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
469                        "sync write returned %d\n", rc);
470
471         EXIT;
472 free_oig:
473         oig_release(oig);
474 out:
475         return rc;
476 }
477
478 /* be careful not to return success without setting the page Uptodate or
479  * the next pass through prepare_write will read in stale data from disk. */
480 int ll_commit_write(struct file *file, struct page *page, unsigned from,
481                     unsigned to)
482 {
483         struct inode *inode = page->mapping->host;
484         struct ll_inode_info *lli = ll_i2info(inode);
485         struct lov_stripe_md *lsm = lli->lli_smd;
486         struct obd_export *exp = NULL;
487         struct ll_async_page *llap;
488         loff_t size;
489         int rc = 0;
490         ENTRY;
491
492         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
493         LASSERT(inode == file->f_dentry->d_inode);
494         LASSERT(PageLocked(page));
495
496         CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
497                inode, page, from, to, page->index);
498
499         llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
500         if (IS_ERR(llap))
501                 RETURN(PTR_ERR(llap));
502
503         exp = ll_i2dtexp(inode);
504         if (exp == NULL)
505                 RETURN(-EINVAL);
506
507         /* queue a write for some time in the future the first time we
508          * dirty the page */
509         if (!PageDirty(page)) {
510                 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
511                                      LPROC_LL_DIRTY_MISSES);
512
513                 rc = queue_or_sync_write(exp, ll_i2info(inode)->lli_smd, 
514                                          llap, to, 0);
515                 if (rc)
516                         GOTO(out, rc);
517         } else {
518                 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
519                                      LPROC_LL_DIRTY_HITS);
520         }
521
522         /* put the page in the page cache, from now on ll_removepage is
523          * responsible for cleaning up the llap.
524          * don't dirty the page if it has been write out in q_o_s_w */
525         if (llap->llap_write_queued)
526                 set_page_dirty(page);
527         EXIT;
528 out:
529         size = (((obd_off)page->index) << PAGE_SHIFT) + to;
530         down(&lli->lli_size_sem);
531         if (rc == 0) {
532                 obd_adjust_kms(exp, lsm, size, 0);
533                 if (size > inode->i_size)
534                         inode->i_size = size;
535                 SetPageUptodate(page);
536         } else if (size > inode->i_size) {
537                 /* this page beyond the pales of i_size, so it can't be
538                  * truncated in ll_p_r_e during lock revoking. we must
539                  * teardown our book-keeping here. */
540                 ll_removepage(page);
541         }
542         up(&lli->lli_size_sem);
543         return rc;
544 }
545
546 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
547 {
548         struct ll_ra_info *ra = &sbi->ll_ra_info;
549         unsigned long ret;
550         ENTRY;
551
552         spin_lock(&sbi->ll_lock);
553         ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
554         ra->ra_cur_pages += ret;
555         spin_unlock(&sbi->ll_lock);
556
557         RETURN(ret);
558 }
559
560 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
561 {
562         struct ll_ra_info *ra = &sbi->ll_ra_info;
563         spin_lock(&sbi->ll_lock);
564         LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
565                  ra->ra_cur_pages, len);
566         ra->ra_cur_pages -= len;
567         spin_unlock(&sbi->ll_lock);
568 }
569
570 int ll_writepage(struct page *page)
571 {
572         struct inode *inode = page->mapping->host;
573         struct obd_export *exp;
574         struct ll_async_page *llap;
575         int rc = 0;
576         ENTRY;
577
578         LASSERT(!PageDirty(page));
579         LASSERT(PageLocked(page));
580
581         exp = ll_i2dtexp(inode);
582         if (exp == NULL)
583                 GOTO(out, rc = -EINVAL);
584
585         llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
586         if (IS_ERR(llap))
587                 GOTO(out, rc = PTR_ERR(llap));
588
589         page_cache_get(page);
590         if (llap->llap_write_queued) {
591                 LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
592                 rc = obd_set_async_flags(exp, ll_i2info(inode)->lli_smd, NULL,
593                                          llap->llap_cookie,
594                                          ASYNC_READY | ASYNC_URGENT);
595         } else {
596                 rc = queue_or_sync_write(exp, ll_i2info(inode)->lli_smd, llap,
597                                          PAGE_SIZE, ASYNC_READY |
598                                          ASYNC_URGENT);
599         }
600         if (rc)
601                 page_cache_release(page);
602         EXIT;
603 out:
604         if (rc)
605                 unlock_page(page);
606         return rc;
607 }
608
609 /* called for each page in a completed rpc.*/
610 void ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
611 {
612         struct ll_async_page *llap;
613         struct page *page;
614         ENTRY;
615
616         llap = LLAP_FROM_COOKIE(data);
617         page = llap->llap_page;
618         LASSERT(PageLocked(page));
619
620         LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
621
622         if (cmd == OBD_BRW_READ && llap->llap_defer_uptodate)
623                 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
624
625         if (rc == 0)  {
626                 if (cmd == OBD_BRW_READ) {
627                         if (!llap->llap_defer_uptodate)
628                                 SetPageUptodate(page);
629                 } else {
630                         llap->llap_write_queued = 0;
631                 }
632                 ClearPageError(page);
633         } else {
634                 if (cmd == OBD_BRW_READ)
635                         llap->llap_defer_uptodate = 0;
636                 SetPageError(page);
637         }
638
639         unlock_page(page);
640
641         if (0 && cmd == OBD_BRW_WRITE) {
642                 llap_write_complete(page->mapping->host, llap);
643                 ll_try_done_writing(page->mapping->host);
644         }
645         
646         if (PageWriteback(page)) {
647                 end_page_writeback(page);
648         }
649         page_cache_release(page);
650         EXIT;
651 }
652
653 /* the kernel calls us here when a page is unhashed from the page cache.
654  * the page will be locked and the kernel is holding a spinlock, so
655  * we need to be careful.  we're just tearing down our book-keeping
656  * here. */
657 void ll_removepage(struct page *page)
658 {
659         struct inode *inode = page->mapping->host;
660         struct obd_export *exp;
661         struct ll_async_page *llap;
662         struct ll_sb_info *sbi = ll_i2sbi(inode);
663         int rc;
664         ENTRY;
665
666         LASSERT(!in_interrupt());
667
668         /* sync pages or failed read pages can leave pages in the page
669          * cache that don't have our data associated with them anymore */
670         if (page->private == 0) {
671                 EXIT;
672                 return;
673         }
674
675         LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
676
677         exp = ll_i2dtexp(inode);
678         if (exp == NULL) {
679                 CERROR("page %p ind %lu gave null export\n", page, page->index);
680                 EXIT;
681                 return;
682         }
683
684         llap = llap_from_page(page, 0);
685         if (IS_ERR(llap)) {
686                 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
687                        page->index, PTR_ERR(llap));
688                 EXIT;
689                 return;
690         }
691
692         //llap_write_complete(inode, llap);
693         rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
694                                      llap->llap_cookie);
695         if (rc != 0)
696                 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
697
698         /* this unconditional free is only safe because the page lock
699          * is providing exclusivity to memory pressure/truncate/writeback..*/
700         __clear_page_ll_data(page);
701
702         spin_lock(&sbi->ll_lock);
703         if (!list_empty(&llap->llap_proc_item))
704                 list_del_init(&llap->llap_proc_item);
705         sbi->ll_pglist_gen++;
706         spin_unlock(&sbi->ll_lock);
707         OBD_FREE(llap, sizeof(*llap));
708         EXIT;
709 }
710
711 static int ll_page_matches(struct page *page, int fd_flags)
712 {
713         struct lustre_handle match_lockh = {0};
714         struct inode *inode = page->mapping->host;
715         ldlm_policy_data_t page_extent;
716         int flags, matches;
717         ENTRY;
718
719         if (fd_flags & LL_FILE_GROUP_LOCKED)
720                 RETURN(1);
721
722         page_extent.l_extent.start = (__u64)page->index << PAGE_CACHE_SHIFT;
723         page_extent.l_extent.end =
724                 page_extent.l_extent.start + PAGE_CACHE_SIZE - 1;
725         flags = LDLM_FL_CBPENDING | LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
726         matches = obd_match(ll_i2sbi(inode)->ll_dt_exp,
727                             ll_i2info(inode)->lli_smd, LDLM_EXTENT,
728                             &page_extent, LCK_PR | LCK_PW, &flags, inode,
729                             &match_lockh);
730         RETURN(matches);
731 }
732
733 static int ll_issue_page_read(struct obd_export *exp,
734                               struct ll_async_page *llap,
735                               struct obd_io_group *oig, int defer)
736 {
737         struct page *page = llap->llap_page;
738         int rc;
739
740         page_cache_get(page);
741         llap->llap_defer_uptodate = defer;
742         llap->llap_ra_used = 0;
743         rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
744                                 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
745                                 PAGE_SIZE, 0, ASYNC_COUNT_STABLE | ASYNC_READY
746                                               | ASYNC_URGENT);
747         if (rc) {
748                 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
749                 page_cache_release(page);
750         }
751         RETURN(rc);
752 }
753
754 static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
755 {
756         LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
757         ra->ra_stats[which]++;
758 }
759
760 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
761 {
762         struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
763         struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
764
765         spin_lock(&sbi->ll_lock);
766         ll_ra_stats_inc_unlocked(ra, which);
767         spin_unlock(&sbi->ll_lock);
768 }
769
770 void ll_ra_accounting(struct page *page, struct address_space *mapping)
771 {
772         struct ll_async_page *llap;
773
774         llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
775         if (IS_ERR(llap))
776                 return;
777
778         if (!llap->llap_defer_uptodate || llap->llap_ra_used)
779                 return;
780
781         ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
782 }
783
784 #define RAS_CDEBUG(ras) \
785         CDEBUG(D_READA, "lrp %lu c %lu ws %lu wl %lu nra %lu\n",        \
786                ras->ras_last_readpage, ras->ras_consecutive,            \
787                ras->ras_window_start, ras->ras_window_len,              \
788                ras->ras_next_readahead);
789
790 static int index_in_window(unsigned long index, unsigned long point,
791                            unsigned long before, unsigned long after)
792 {
793         unsigned long start = point - before, end = point + after;
794
795         if (start > point)
796                start = 0;
797         if (end < point)
798                end = ~0;
799
800         return start <= index && index <= end;
801 }
802
803 static int ll_readahead(struct ll_readahead_state *ras,
804                          struct obd_export *exp, struct address_space *mapping,
805                          struct obd_io_group *oig, int flags)
806 {
807         unsigned long i, start = 0, end = 0, reserved;
808         struct ll_async_page *llap;
809         struct page *page;
810         int rc, ret = 0, match_failed = 0;
811         __u64 kms;
812         ENTRY;
813
814         kms = lov_merge_size(ll_i2info(mapping->host)->lli_smd, 1);
815         if (kms == 0) {
816                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
817                 RETURN(0);
818         }
819         spin_lock(&ras->ras_lock);
820
821         /* reserve a part of the read-ahead window that we'll be issuing */
822         if (ras->ras_window_len) {
823                 start = ras->ras_next_readahead;
824                 end = ras->ras_window_start + ras->ras_window_len - 1;
825                 end = min(end, (unsigned long)(kms >> PAGE_CACHE_SHIFT));
826                 ras->ras_next_readahead = max(end, end + 1);
827
828                 RAS_CDEBUG(ras);
829         }
830
831         spin_unlock(&ras->ras_lock);
832
833         if (end == 0) {
834                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
835                 RETURN(0);
836         }
837
838         reserved = ll_ra_count_get(ll_i2sbi(mapping->host), end - start + 1);
839         if (reserved < end - start + 1)
840                 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
841
842         for (i = start; reserved > 0 && !match_failed && i <= end; i++) {
843                 /* skip locked pages from previous readpage calls */
844                 page = grab_cache_page_nowait(mapping, i);
845                 if (page == NULL) {
846                         CDEBUG(D_READA, "g_c_p_n failed\n");
847                         continue;
848                 }
849                 
850                 /* we do this first so that we can see the page in the /proc
851                  * accounting */
852                 llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
853                 if (IS_ERR(llap) || llap->llap_defer_uptodate)
854                         goto next_page;
855
856                 /* skip completed pages */
857                 if (Page_Uptodate(page))
858                         goto next_page;
859
860                 /* bail when we hit the end of the lock. */
861                 if ((rc = ll_page_matches(page, flags)) <= 0) {
862                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
863                                        "lock match failed: rc %d\n", rc);
864                         ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
865                         match_failed = 1;
866                         goto next_page;
867                 }
868
869                 rc = ll_issue_page_read(exp, llap, oig, 1);
870                 if (rc == 0) {
871                         reserved--;
872                         ret++;
873                         LL_CDEBUG_PAGE(D_READA| D_PAGE, page, 
874                                        "started read-ahead\n");
875                 }
876                 if (rc) {
877         next_page:
878                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page, 
879                                        "skipping read-ahead\n");
880
881                         unlock_page(page);
882                 }
883                 page_cache_release(page);
884         }
885
886         LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
887         if (reserved != 0)
888                 ll_ra_count_put(ll_i2sbi(mapping->host), reserved);
889
890         if (i == end + 1 && end == (kms >> PAGE_CACHE_SHIFT))
891                 ll_ra_stats_inc(mapping, RA_STAT_EOF);
892
893         /* if we didn't get to the end of the region we reserved from
894          * the ras we need to go back and update the ras so that the
895          * next read-ahead tries from where we left off.  we only do so
896          * if the region we failed to issue read-ahead on is still ahead
897          * of the app and behind the next index to start read-ahead from */
898         if (i != end + 1) {
899                 spin_lock(&ras->ras_lock);
900                 if (i < ras->ras_next_readahead &&
901                     index_in_window(i, ras->ras_window_start, 0,
902                                     ras->ras_window_len)) {
903                         ras->ras_next_readahead = i;
904                         RAS_CDEBUG(ras);
905                 }
906                 spin_unlock(&ras->ras_lock);
907         }
908
909         RETURN(ret);
910 }
911
912 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
913 {
914         ras->ras_window_start = index & (~(PTLRPC_MAX_BRW_PAGES - 1));
915 }
916
917 /* called with the ras_lock held or from places where it doesn't matter */
918 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
919 {
920         ras->ras_last_readpage = index;
921         ras->ras_consecutive = 1;
922         ras->ras_window_len = 0;
923         ras_set_start(ras, index);
924         ras->ras_next_readahead = ras->ras_window_start;
925
926         RAS_CDEBUG(ras);
927 }
928
929 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
930 {
931         spin_lock_init(&ras->ras_lock);
932         ras_reset(ras, 0);
933 }
934
935 static void ras_update(struct ll_sb_info *sbi, struct ll_readahead_state *ras,
936                        unsigned long index, unsigned hit)
937 {
938         struct ll_ra_info *ra = &sbi->ll_ra_info;
939         int zero = 0;
940         ENTRY;
941
942         spin_lock(&sbi->ll_lock);
943         spin_lock(&ras->ras_lock);
944         
945         ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
946
947         /* reset the read-ahead window in two cases.  First when the app seeks
948          * or reads to some other part of the file.  Secondly if we get a
949          * read-ahead miss that we think we've previously issued.  This can
950          * be a symptom of there being so many read-ahead pages that the VM is
951          * reclaiming it before we get to it. */
952         if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
953                 zero = 1;
954                 ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
955         } else if (!hit && ras->ras_window_len &&
956                    index < ras->ras_next_readahead &&
957                    index_in_window(index, ras->ras_window_start, 0,
958                                    ras->ras_window_len)) {
959                 zero = 1;
960                 ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
961         }
962
963         if (zero) {
964                 ras_reset(ras, index);
965                 GOTO(out_unlock, 0);
966         }
967
968         ras->ras_last_readpage = index;
969         ras->ras_consecutive++;
970         ras_set_start(ras, index);
971         ras->ras_next_readahead = max(ras->ras_window_start,
972                                       ras->ras_next_readahead);
973
974         /* wait for a few pages to arrive before issuing readahead to avoid
975          * the worst overutilization */
976         if (ras->ras_consecutive == 3) {
977                 ras->ras_window_len = PTLRPC_MAX_BRW_PAGES;
978                 GOTO(out_unlock, 0);
979         }
980
981         /* we need to increase the window sometimes.  we'll arbitrarily
982          * do it half-way through the pages in an rpc */
983         if ((index & (PTLRPC_MAX_BRW_PAGES - 1)) == 
984             (PTLRPC_MAX_BRW_PAGES >> 1)) {
985                 ras->ras_window_len += PTLRPC_MAX_BRW_PAGES;
986                 ras->ras_window_len = min(ras->ras_window_len,
987                                           ra->ra_max_pages);
988
989         }
990
991         EXIT;
992 out_unlock:
993         RAS_CDEBUG(ras);
994         spin_unlock(&ras->ras_lock);
995         spin_unlock(&sbi->ll_lock);
996 }
997
998 /*
999  * for now we do our readpage the same on both 2.4 and 2.5.  The kernel's
1000  * read-ahead assumes it is valid to issue readpage all the way up to
1001  * i_size, but our dlm locks make that not the case.  We disable the
1002  * kernel's read-ahead and do our own by walking ahead in the page cache
1003  * checking for dlm lock coverage.  the main difference between 2.4 and
1004  * 2.6 is how read-ahead gets batched and issued, but we're using our own,
1005  * so they look the same.
1006  */
1007 int ll_readpage(struct file *filp, struct page *page)
1008 {
1009         struct ll_file_data *fd = filp->private_data;
1010         struct inode *inode = page->mapping->host;
1011         struct obd_export *exp;
1012         struct ll_async_page *llap;
1013         struct obd_io_group *oig = NULL;
1014         int rc;
1015         ENTRY;
1016
1017         LASSERT(PageLocked(page));
1018         LASSERT(!PageUptodate(page));
1019         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset="LPX64"\n",
1020                inode->i_ino, inode->i_generation, inode,
1021                (((obd_off)page->index) << PAGE_SHIFT));
1022         LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
1023
1024         rc = oig_init(&oig);
1025         if (rc < 0)
1026                 GOTO(out, rc);
1027
1028         exp = ll_i2dtexp(inode);
1029         if (exp == NULL)
1030                 GOTO(out, rc = -EINVAL);
1031
1032         llap = llap_from_page(page, LLAP_ORIGIN_READPAGE);
1033         if (IS_ERR(llap))
1034                 GOTO(out, rc = PTR_ERR(llap));
1035
1036         if (ll_i2sbi(inode)->ll_flags & LL_SBI_READAHEAD)
1037                 ras_update(ll_i2sbi(inode), &fd->fd_ras, page->index,
1038                            llap->llap_defer_uptodate);
1039         
1040         if (llap->llap_defer_uptodate) {
1041                 llap->llap_ra_used = 1;
1042                 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1043                                   fd->fd_flags);
1044                 if (rc > 0)
1045                         obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, 
1046                                              NULL, oig);
1047                 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
1048                 SetPageUptodate(page);
1049                 unlock_page(page);
1050                 GOTO(out_oig, rc = 0);
1051         }
1052
1053         rc = ll_page_matches(page, fd->fd_flags);
1054         if (rc < 0) {
1055                 LL_CDEBUG_PAGE(D_ERROR, page, "lock match failed: rc %d\n", rc);
1056                 GOTO(out, rc);
1057         }
1058
1059         if (rc == 0) {
1060                 CWARN("ino %lu page %lu (%llu) not covered by "
1061                       "a lock (mmap?).  check debug logs.\n",
1062                       inode->i_ino, page->index,
1063                       (long long)page->index << PAGE_CACHE_SHIFT);
1064         }
1065
1066         rc = ll_issue_page_read(exp, llap, oig, 0);
1067         if (rc)
1068                 GOTO(out, rc);
1069
1070         LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
1071         if (ll_i2sbi(inode)->ll_flags & LL_SBI_READAHEAD)
1072                 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1073                              fd->fd_flags);
1074
1075         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
1076         EXIT;
1077 out:
1078         if (rc)
1079                 unlock_page(page);
1080 out_oig:
1081         if (oig != NULL)
1082                 oig_release(oig);
1083         return rc;
1084 }