Whamcloud - gitweb
Branch b1_4
[fs/lustre-release.git] / lustre / llite / rw.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Lite I/O page cache routines shared by different kernel revs
5  *
6  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #include <linux/config.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/string.h>
28 #include <linux/stat.h>
29 #include <linux/errno.h>
30 #include <linux/smp_lock.h>
31 #include <linux/unistd.h>
32 #include <linux/version.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
35
36 #include <linux/fs.h>
37 #include <linux/stat.h>
38 #include <asm/uaccess.h>
39 #include <asm/segment.h>
40 #include <linux/mm.h>
41 #include <linux/pagemap.h>
42 #include <linux/smp_lock.h>
43
44 #define DEBUG_SUBSYSTEM S_LLITE
45
46 #include <linux/lustre_mds.h>
47 #include <linux/lustre_lite.h>
48 #include "llite_internal.h"
49 #include <linux/lustre_compat25.h>
50
51 #ifndef list_for_each_prev_safe
52 #define list_for_each_prev_safe(pos, n, head) \
53         for (pos = (head)->prev, n = pos->prev; pos != (head); \
54                 pos = n, n = pos->prev )
55 #endif
56
57 kmem_cache_t *ll_async_page_slab = NULL;
58 size_t ll_async_page_slab_size = 0;
59
60 /* SYNCHRONOUS I/O to object storage for an inode */
61 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
62                   struct page *page, int flags)
63 {
64         struct ll_inode_info *lli = ll_i2info(inode);
65         struct lov_stripe_md *lsm = lli->lli_smd;
66         struct brw_page pg;
67         int rc;
68         ENTRY;
69
70         pg.pg = page;
71         pg.off = ((obd_off)page->index) << PAGE_SHIFT;
72
73         if (cmd == OBD_BRW_WRITE && (pg.off + PAGE_SIZE > inode->i_size))
74                 pg.count = inode->i_size % PAGE_SIZE;
75         else
76                 pg.count = PAGE_SIZE;
77
78         LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
79                        cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
80                        inode->i_ino, pg.off, pg.off);
81         if (pg.count == 0) {
82                 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
83                        LPU64"\n",
84                        inode->i_ino, inode, inode->i_size, page->mapping->host,
85                        page->mapping->host->i_size, page->index, pg.off);
86         }
87
88         pg.flag = flags;
89
90         if (cmd == OBD_BRW_WRITE)
91                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
92                                     LPROC_LL_BRW_WRITE, pg.count);
93         else
94                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
95                                     LPROC_LL_BRW_READ, pg.count);
96         rc = obd_brw(cmd, ll_i2obdexp(inode), oa, lsm, 1, &pg, NULL);
97         if (rc == 0)
98                 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
99         else if (rc != -EIO)
100                 CERROR("error from obd_brw: rc = %d\n", rc);
101         RETURN(rc);
102 }
103
104 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
105
106 /* this isn't where truncate starts.   roughly:
107  * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate
108  * we grab the lock back in setattr_raw to avoid races.
109  *
110  * must be called with lli_size_sem held */
111 void ll_truncate(struct inode *inode)
112 {
113         struct ll_inode_info *lli = ll_i2info(inode);
114         struct lov_stripe_md *lsm = lli->lli_smd;
115         struct obdo oa;
116         int rc;
117         ENTRY;
118         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu=%#Lx\n",inode->i_ino,
119                inode->i_generation, inode, inode->i_size, inode->i_size);
120
121         if (lli->lli_size_pid != current->pid) {
122                 EXIT;
123                 return;
124         }
125
126         if (!lsm) {
127                 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
128                        inode->i_ino);
129                 GOTO(out_unlock, 0);
130         }
131
132         LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
133
134         /* XXX I'm pretty sure this is a hack to paper over a more fundamental
135          * race condition. */
136         if (lov_merge_size(lsm, 0) == inode->i_size) {
137                 CDEBUG(D_VFSTRACE, "skipping punch for obj "LPX64", %Lu=%#Lx\n",
138                        lsm->lsm_object_id, inode->i_size, inode->i_size);
139                 GOTO(out_unlock, 0);
140         }
141
142         if (unlikely((ll_i2sbi(inode)->ll_flags & LL_SBI_CHECKSUM) &&
143                      (inode->i_size & ~PAGE_MASK))) {
144                 /* If the truncate leaves behind a partial page, update its
145                  * checksum. */
146                 struct page *page = find_get_page(inode->i_mapping,
147                                              inode->i_size >> PAGE_CACHE_SHIFT);
148                 if (page != NULL) {
149                         struct ll_async_page *llap = llap_cast_private(page);
150                         if (llap != NULL) {
151                                 llap->llap_checksum =
152                                         crc32_le(0, kmap(page), PAGE_SIZE);
153                                 kunmap(page);
154                         }
155                         page_cache_release(page);
156                 }
157         }
158
159         CDEBUG(D_INFO, "calling punch for "LPX64" (new size %Lu=%#Lx)\n",
160                lsm->lsm_object_id, inode->i_size, inode->i_size);
161
162         oa.o_id = lsm->lsm_object_id;
163         oa.o_valid = OBD_MD_FLID;
164         obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |
165                         OBD_MD_FLATIME |OBD_MD_FLMTIME |OBD_MD_FLCTIME);
166
167         obd_adjust_kms(ll_i2obdexp(inode), lsm, inode->i_size, 1);
168
169         lli->lli_size_pid = 0;
170         up(&lli->lli_size_sem);
171
172         rc = obd_punch(ll_i2obdexp(inode), &oa, lsm, inode->i_size,
173                        OBD_OBJECT_EOF, NULL);
174         if (rc)
175                 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
176         else
177                 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE|OBD_MD_FLBLOCKS|
178                               OBD_MD_FLATIME | OBD_MD_FLMTIME |
179                               OBD_MD_FLCTIME);
180         EXIT;
181         return;
182
183  out_unlock:
184         lli->lli_size_pid = 0;
185         up(&lli->lli_size_sem);
186 } /* ll_truncate */
187
188 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
189 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
190                      unsigned to)
191 {
192         struct inode *inode = page->mapping->host;
193         struct ll_inode_info *lli = ll_i2info(inode);
194         struct lov_stripe_md *lsm = lli->lli_smd;
195         obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
196         struct brw_page pga;
197         struct obdo oa;
198         __u64 kms;
199         int rc = 0;
200         ENTRY;
201
202         LASSERT(PageLocked(page));
203         (void)llap_cast_private(page); /* assertion */
204
205         /* Check to see if we should return -EIO right away */
206         pga.pg = page;
207         pga.off = offset;
208         pga.count = PAGE_SIZE;
209         pga.flag = 0;
210
211         oa.o_id = lsm->lsm_object_id;
212         oa.o_mode = inode->i_mode;
213         oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | OBD_MD_FLTYPE;
214
215         rc = obd_brw(OBD_BRW_CHECK, ll_i2obdexp(inode), &oa, lsm, 1, &pga,
216                      NULL);
217         if (rc)
218                 RETURN(rc);
219
220         if (PageUptodate(page)) {
221                 LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
222                 RETURN(0);
223         }
224
225         /* We're completely overwriting an existing page, so _don't_ set it up
226          * to date until commit_write */
227         if (from == 0 && to == PAGE_SIZE) {
228                 LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
229                 POISON_PAGE(page, 0x11);
230                 RETURN(0);
231         }
232
233         /* If are writing to a new page, no need to read old data.  The extent
234          * locking will have updated the KMS, and for our purposes here we can
235          * treat it like i_size. */
236         down(&lli->lli_size_sem);
237         kms = lov_merge_size(lsm, 1);
238         up(&lli->lli_size_sem);
239         if (kms <= offset) {
240                 LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
241                                kms, offset);
242                 memset(kmap(page), 0, PAGE_SIZE);
243                 kunmap(page);
244                 GOTO(prepare_done, rc = 0);
245         }
246
247         /* XXX could be an async ocp read.. read-ahead? */
248         rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
249         if (rc == 0) {
250                 /* bug 1598: don't clobber blksize */
251                 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
252                 obdo_refresh_inode(inode, &oa, oa.o_valid);
253         }
254
255         EXIT;
256  prepare_done:
257         if (rc == 0)
258                 SetPageUptodate(page);
259
260         return rc;
261 }
262
263 struct ll_async_page *llap_from_cookie(void *cookie)
264 {
265         struct ll_async_page *llap = cookie;
266         if (llap->llap_magic != LLAP_MAGIC)
267                 return ERR_PTR(-EINVAL);
268         return llap;
269 };
270
271 static int ll_ap_make_ready(void *data, int cmd)
272 {
273         struct ll_async_page *llap;
274         struct page *page;
275         ENTRY;
276
277         llap = llap_from_cookie(data);
278         if (IS_ERR(llap))
279                 RETURN(-EINVAL);
280
281         page = llap->llap_page;
282
283         LASSERT(cmd != OBD_BRW_READ);
284
285         /* we're trying to write, but the page is locked.. come back later */
286         if (TryLockPage(page))
287                 RETURN(-EAGAIN);
288
289         LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
290         page_cache_get(page);
291
292         /* if we left PageDirty we might get another writepage call
293          * in the future.  list walkers are bright enough
294          * to check page dirty so we can leave it on whatever list
295          * its on.  XXX also, we're called with the cli list so if
296          * we got the page cache list we'd create a lock inversion
297          * with the removepage path which gets the page lock then the
298          * cli lock */
299         clear_page_dirty(page);
300         RETURN(0);
301 }
302
303 /* We have two reasons for giving llite the opportunity to change the
304  * write length of a given queued page as it builds the RPC containing
305  * the page:
306  *
307  * 1) Further extending writes may have landed in the page cache
308  *    since a partial write first queued this page requiring us
309  *    to write more from the page cache.  (No further races are possible, since
310  *    by the time this is called, the page is locked.)
311  * 2) We might have raced with truncate and want to avoid performing
312  *    write RPCs that are just going to be thrown away by the
313  *    truncate's punch on the storage targets.
314  *
315  * The kms serves these purposes as it is set at both truncate and extending
316  * writes.
317  */
318 static int ll_ap_refresh_count(void *data, int cmd)
319 {
320         struct ll_inode_info *lli;
321         struct ll_async_page *llap;
322         struct lov_stripe_md *lsm;
323         struct page *page;
324         __u64 kms;
325         ENTRY;
326
327         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
328         LASSERT(cmd != OBD_BRW_READ);
329
330         llap = llap_from_cookie(data);
331         if (IS_ERR(llap))
332                 RETURN(PTR_ERR(llap));
333
334         page = llap->llap_page;
335         lli = ll_i2info(page->mapping->host);
336         lsm = lli->lli_smd;
337
338         //down(&lli->lli_size_sem);
339         kms = lov_merge_size(lsm, 1);
340         //up(&lli->lli_size_sem);
341
342         /* catch race with truncate */
343         if (((__u64)page->index << PAGE_SHIFT) >= kms)
344                 return 0;
345
346         /* catch sub-page write at end of file */
347         if (((__u64)page->index << PAGE_SHIFT) + PAGE_SIZE > kms)
348                 return kms % PAGE_SIZE;
349
350         return PAGE_SIZE;
351 }
352
353 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
354 {
355         struct lov_stripe_md *lsm;
356         obd_flag valid_flags;
357
358         lsm = ll_i2info(inode)->lli_smd;
359
360         oa->o_id = lsm->lsm_object_id;
361         oa->o_valid = OBD_MD_FLID;
362         valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
363         if (cmd == OBD_BRW_WRITE) {
364                 oa->o_valid |= OBD_MD_FLIFID | OBD_MD_FLEPOCH;
365                 mdc_pack_fid(obdo_fid(oa), inode->i_ino, 0, inode->i_mode);
366                 oa->o_easize = ll_i2info(inode)->lli_io_epoch;
367                 oa->o_uid = inode->i_uid;
368                 oa->o_gid = inode->i_gid;
369
370                 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
371                                OBD_MD_FLUID | OBD_MD_FLGID;
372         }
373
374         obdo_from_inode(oa, inode, valid_flags);
375 }
376
377 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
378 {
379         struct ll_async_page *llap;
380         ENTRY;
381
382         llap = llap_from_cookie(data);
383         if (IS_ERR(llap)) {
384                 EXIT;
385                 return;
386         }
387
388         ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
389         EXIT;
390 }
391
392 static void ll_ap_get_ucred(void *data, struct obd_ucred *ouc)
393 {
394         struct ll_async_page *llap;
395
396         llap = llap_from_cookie(data);
397         if (IS_ERR(llap)) {
398                 EXIT;
399                 return;
400         }
401
402         memcpy(ouc, &llap->llap_ouc, sizeof(*ouc));
403         EXIT;
404 }
405
406 static struct obd_async_page_ops ll_async_page_ops = {
407         .ap_make_ready =        ll_ap_make_ready,
408         .ap_refresh_count =     ll_ap_refresh_count,
409         .ap_fill_obdo =         ll_ap_fill_obdo,
410         .ap_completion =        ll_ap_completion,
411         .ap_get_ucred =         ll_ap_get_ucred,
412 };
413
414 struct ll_async_page *llap_cast_private(struct page *page)
415 {
416         struct ll_async_page *llap = (struct ll_async_page *)page->private;
417
418         LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
419                  "page %p private %lu gave magic %d which != %d\n",
420                  page, page->private, llap->llap_magic, LLAP_MAGIC);
421
422         return llap;
423 }
424
425 /* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
426  *
427  * There is an llap attached onto every page in lustre, linked off @sbi.
428  * We add an llap to the list so we don't lose our place during list walking.
429  * If llaps in the list are being moved they will only move to the end
430  * of the LRU, and we aren't terribly interested in those pages here (we
431  * start at the beginning of the list where the least-used llaps are.
432  */
433 int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
434 {
435         struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
436         unsigned long total, want, count = 0;
437
438         total = sbi->ll_async_page_count;
439
440         /* There can be a large number of llaps (600k or more in a large
441          * memory machine) so the VM 1/6 shrink ratio is likely too much.
442          * Since we are freeing pages also, we don't necessarily want to
443          * shrink so much.  Limit to 40MB of pages + llaps per call. */
444         if (shrink_fraction == 0)
445                 want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
446         else
447                 want = (total + shrink_fraction - 1) / shrink_fraction;
448
449         if (want > 40 << (20 - PAGE_CACHE_SHIFT))
450                 want = 40 << (20 - PAGE_CACHE_SHIFT);
451
452         CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
453                want, total, shrink_fraction);
454
455         spin_lock(&sbi->ll_lock);
456         list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
457
458         while (--total >= 0 && count < want) {
459                 struct page *page;
460                 int keep;
461
462                 if (unlikely(need_resched())) {
463                         spin_unlock(&sbi->ll_lock);
464                         cond_resched();
465                         spin_lock(&sbi->ll_lock);
466                 }
467
468                 llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
469                 list_del_init(&dummy_llap.llap_pglist_item);
470                 if (llap == NULL)
471                         break;
472
473                 page = llap->llap_page;
474                 LASSERT(page != NULL);
475
476                 list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
477
478                 /* Page needs/undergoing IO */
479                 if (TryLockPage(page)) {
480                         LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
481                         continue;
482                 }
483
484                 if (llap->llap_write_queued || PageDirty(page) ||
485                     (!PageUptodate(page) &&
486                      llap->llap_origin != LLAP_ORIGIN_READAHEAD))
487                         keep = 1;
488                 else
489                         keep = 0;
490
491                 LL_CDEBUG_PAGE(D_PAGE, page,"%s LRU page: %s%s%s%s origin %s\n",
492                                keep ? "keep" : "drop",
493                                llap->llap_write_queued ? "wq " : "",
494                                PageDirty(page) ? "pd " : "",
495                                PageUptodate(page) ? "" : "!pu ",
496                                llap->llap_defer_uptodate ? "" : "!du",
497                                llap_origins[llap->llap_origin]);
498
499                 /* If page is dirty or undergoing IO don't discard it */
500                 if (keep) {
501                         unlock_page(page);
502                         continue;
503                 }
504
505                 page_cache_get(page);
506                 spin_unlock(&sbi->ll_lock);
507
508                 ++count;
509                 if (page->mapping != NULL) {
510                         ll_ra_accounting(page, page->mapping);
511                         ll_truncate_complete_page(page);
512                 }
513                 unlock_page(page);
514                 page_cache_release(page);
515
516                 spin_lock(&sbi->ll_lock);
517         }
518         list_del(&dummy_llap.llap_pglist_item);
519         spin_unlock(&sbi->ll_lock);
520
521         CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
522                count, want, total);
523
524         return count;
525 }
526
527 struct ll_async_page *llap_from_page(struct page *page, unsigned origin)
528 {
529         struct ll_async_page *llap;
530         struct obd_export *exp;
531         struct inode *inode = page->mapping->host;
532         struct ll_sb_info *sbi = ll_i2sbi(inode);
533         int rc;
534         ENTRY;
535
536         LASSERT(ll_async_page_slab);
537         LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
538
539         llap = llap_cast_private(page);
540         if (llap != NULL) {
541                 /* move to end of LRU list */
542                 spin_lock(&sbi->ll_lock);
543                 sbi->ll_pglist_gen++;
544                 list_del_init(&llap->llap_pglist_item);
545                 list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
546                 spin_unlock(&sbi->ll_lock);
547                 GOTO(out, llap);
548         }
549
550         exp = ll_i2obdexp(page->mapping->host);
551         if (exp == NULL)
552                 RETURN(ERR_PTR(-EINVAL));
553
554         /* limit the number of lustre-cached pages */
555         if (sbi->ll_async_page_count >= sbi->ll_async_page_max)
556                 llap_shrink_cache(sbi, 0);
557
558         OBD_SLAB_ALLOC(llap, ll_async_page_slab, SLAB_KERNEL,
559                        ll_async_page_slab_size);
560         if (llap == NULL)
561                 RETURN(ERR_PTR(-ENOMEM));
562         llap->llap_magic = LLAP_MAGIC;
563         llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
564
565         rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
566                                  (obd_off)page->index << PAGE_SHIFT,
567                                  &ll_async_page_ops, llap, &llap->llap_cookie);
568         if (rc) {
569                 OBD_SLAB_FREE(llap, ll_async_page_slab,
570                               ll_async_page_slab_size);
571                 RETURN(ERR_PTR(rc));
572         }
573
574         CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
575                page, llap->llap_cookie, (obd_off)page->index << PAGE_SHIFT);
576         /* also zeroing the PRIVBITS low order bitflags */
577         __set_page_ll_data(page, llap);
578         llap->llap_page = page;
579
580         spin_lock(&sbi->ll_lock);
581         sbi->ll_pglist_gen++;
582         sbi->ll_async_page_count++;
583         list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
584         spin_unlock(&sbi->ll_lock);
585
586  out:
587         if (unlikely(sbi->ll_flags & LL_SBI_CHECKSUM)) {
588                 __u32 csum = 0;
589                 csum = crc32_le(csum, kmap(page), PAGE_SIZE);
590                 kunmap(page);
591                 if (origin == LLAP_ORIGIN_READAHEAD ||
592                     origin == LLAP_ORIGIN_READPAGE) {
593                         llap->llap_checksum = 0;
594                 } else if (origin == LLAP_ORIGIN_COMMIT_WRITE ||
595                            llap->llap_checksum == 0) {
596                         llap->llap_checksum = csum;
597                         CDEBUG(D_PAGE, "page %p cksum %x\n", page, csum);
598                 } else if (llap->llap_checksum == csum) {
599                         /* origin == LLAP_ORIGIN_WRITEPAGE */
600                         CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
601                                page, csum);
602                 } else {
603                         /* origin == LLAP_ORIGIN_WRITEPAGE */
604                         LL_CDEBUG_PAGE(D_ERROR, page, "old cksum %x != new "
605                                        "%x!\n", llap->llap_checksum, csum);
606                 }
607         }
608
609         llap->llap_origin = origin;
610         RETURN(llap);
611 }
612
613 static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
614                                struct ll_async_page *llap,
615                                unsigned to, obd_flag async_flags)
616 {
617         unsigned long size_index = inode->i_size >> PAGE_SHIFT;
618         struct obd_io_group *oig;
619         struct ll_sb_info *sbi = ll_i2sbi(inode);
620         int rc;
621         ENTRY;
622
623         /* _make_ready only sees llap once we've unlocked the page */
624         llap->llap_write_queued = 1;
625         rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
626                                 llap->llap_cookie, OBD_BRW_WRITE, 0, 0, 0,
627                                 async_flags);
628         if (rc == 0) {
629                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
630                 //llap_write_pending(inode, llap);
631                 GOTO(out, 0);
632         }
633
634         llap->llap_write_queued = 0;
635
636         rc = oig_init(&oig);
637         if (rc)
638                 GOTO(out, rc);
639
640         /* make full-page requests if we are not at EOF (bug 4410) */
641         if (to != PAGE_SIZE && llap->llap_page->index < size_index) {
642                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
643                                "sync write before EOF: size_index %lu, to %d\n",
644                                size_index, to);
645                 to = PAGE_SIZE;
646         } else if (to != PAGE_SIZE && llap->llap_page->index == size_index) {
647                 int size_to = inode->i_size & ~PAGE_MASK;
648                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
649                                "sync write at EOF: size_index %lu, to %d/%d\n",
650                                size_index, to, size_to);
651                 if (to < size_to)
652                         to = size_to;
653         }
654
655         /* compare the checksum once before the page leaves llite */
656         if (unlikely((sbi->ll_flags & LL_SBI_CHECKSUM) &&
657                      llap->llap_checksum != 0)) {
658                 __u32 csum = 0;
659                 struct page *page = llap->llap_page;
660                 csum = crc32_le(csum, kmap(page), PAGE_SIZE);
661                 kunmap(page);
662                 if (llap->llap_checksum == csum) {
663                         CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
664                                page, csum);
665                 } else {
666                         CERROR("page %p old cksum %x != new cksum %x!\n",
667                                page, llap->llap_checksum, csum);
668                 }
669         }
670
671         rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
672                                 llap->llap_cookie, OBD_BRW_WRITE, 0, to, 0,
673                                 ASYNC_READY | ASYNC_URGENT |
674                                 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
675         if (rc)
676                 GOTO(free_oig, rc);
677
678         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
679         if (rc)
680                 GOTO(free_oig, rc);
681
682         rc = oig_wait(oig);
683
684         if (!rc && async_flags & ASYNC_READY)
685                 unlock_page(llap->llap_page);
686
687         LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
688
689 free_oig:
690         oig_release(oig);
691 out:
692         RETURN(rc);
693 }
694
695 /* update our write count to account for i_size increases that may have
696  * happened since we've queued the page for io. */
697
698 /* be careful not to return success without setting the page Uptodate or
699  * the next pass through prepare_write will read in stale data from disk. */
700 int ll_commit_write(struct file *file, struct page *page, unsigned from,
701                     unsigned to)
702 {
703         struct inode *inode = page->mapping->host;
704         struct ll_inode_info *lli = ll_i2info(inode);
705         struct lov_stripe_md *lsm = lli->lli_smd;
706         struct obd_export *exp;
707         struct ll_async_page *llap;
708         struct ll_uctxt ctxt;
709         loff_t size;
710         int rc = 0;
711         ENTRY;
712
713         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
714         LASSERT(inode == file->f_dentry->d_inode);
715         LASSERT(PageLocked(page));
716
717         CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
718                inode, page, from, to, page->index);
719
720         llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
721         if (IS_ERR(llap))
722                 RETURN(PTR_ERR(llap));
723
724         exp = ll_i2obdexp(inode);
725         if (exp == NULL)
726                 RETURN(-EINVAL);
727
728         /* set user credit information for this page */
729         llap->llap_ouc.ouc_fsuid = current->fsuid;
730         llap->llap_ouc.ouc_fsgid = current->fsgid;
731         llap->llap_ouc.ouc_cap = current->cap_effective;
732         ll_i2uctxt(&ctxt, inode, NULL);
733         llap->llap_ouc.ouc_suppgid1 = ctxt.gid1;
734
735         /* queue a write for some time in the future the first time we
736          * dirty the page */
737         if (!PageDirty(page)) {
738                 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
739                                      LPROC_LL_DIRTY_MISSES);
740
741                 rc = queue_or_sync_write(exp, inode, llap, to, 0);
742                 if (rc)
743                         GOTO(out, rc);
744         } else {
745                 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
746                                      LPROC_LL_DIRTY_HITS);
747         }
748
749         /* put the page in the page cache, from now on ll_removepage is
750          * responsible for cleaning up the llap.
751          * only set page dirty when it's queued to be write out */
752         if (llap->llap_write_queued)
753                 set_page_dirty(page);
754
755 out:
756         size = (((obd_off)page->index) << PAGE_SHIFT) + to;
757         down(&lli->lli_size_sem);
758         if (rc == 0) {
759                 obd_adjust_kms(exp, lsm, size, 0);
760                 if (size > inode->i_size)
761                         inode->i_size = size;
762                 SetPageUptodate(page);
763         } else if (size > inode->i_size) {
764                 /* this page beyond the pales of i_size, so it can't be
765                  * truncated in ll_p_r_e during lock revoking. we must
766                  * teardown our book-keeping here. */
767                 ll_removepage(page);
768         }
769         up(&lli->lli_size_sem);
770         RETURN(rc);
771 }
772
773 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
774 {
775         struct ll_ra_info *ra = &sbi->ll_ra_info;
776         unsigned long ret;
777         ENTRY;
778
779         spin_lock(&sbi->ll_lock);
780         ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
781         ra->ra_cur_pages += ret;
782         spin_unlock(&sbi->ll_lock);
783
784         RETURN(ret);
785 }
786
787 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
788 {
789         struct ll_ra_info *ra = &sbi->ll_ra_info;
790         spin_lock(&sbi->ll_lock);
791         LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
792                  ra->ra_cur_pages, len);
793         ra->ra_cur_pages -= len;
794         spin_unlock(&sbi->ll_lock);
795 }
796
797 /* called for each page in a completed rpc.*/
798 void ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
799 {
800         struct ll_async_page *llap;
801         struct page *page;
802         ENTRY;
803
804         llap = llap_from_cookie(data);
805         if (IS_ERR(llap)) {
806                 EXIT;
807                 return;
808         }
809
810         page = llap->llap_page;
811         LASSERT(PageLocked(page));
812
813         LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
814
815         if (cmd == OBD_BRW_READ && llap->llap_defer_uptodate)
816                 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
817
818         if (rc == 0)  {
819                 if (cmd == OBD_BRW_READ) {
820                         if (!llap->llap_defer_uptodate)
821                                 SetPageUptodate(page);
822                 } else {
823                         llap->llap_write_queued = 0;
824                 }
825                 ClearPageError(page);
826         } else {
827                 if (cmd == OBD_BRW_READ) {
828                         llap->llap_defer_uptodate = 0;
829                 } else {
830                         ll_redirty_page(page);
831                 }
832                 SetPageError(page);
833         }
834
835         unlock_page(page);
836
837         if (0 && cmd == OBD_BRW_WRITE) {
838                 llap_write_complete(page->mapping->host, llap);
839                 ll_try_done_writing(page->mapping->host);
840         }
841
842         if (PageWriteback(page)) {
843                 end_page_writeback(page);
844         }
845         page_cache_release(page);
846         EXIT;
847 }
848
849 /* the kernel calls us here when a page is unhashed from the page cache.
850  * the page will be locked and the kernel is holding a spinlock, so
851  * we need to be careful.  we're just tearing down our book-keeping
852  * here. */
853 void ll_removepage(struct page *page)
854 {
855         struct inode *inode = page->mapping->host;
856         struct obd_export *exp;
857         struct ll_async_page *llap;
858         struct ll_sb_info *sbi = ll_i2sbi(inode);
859         int rc;
860         ENTRY;
861
862         LASSERT(!in_interrupt());
863
864         /* sync pages or failed read pages can leave pages in the page
865          * cache that don't have our data associated with them anymore */
866         if (page->private == 0) {
867                 EXIT;
868                 return;
869         }
870
871         LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
872
873         exp = ll_i2obdexp(inode);
874         if (exp == NULL) {
875                 CERROR("page %p ind %lu gave null export\n", page, page->index);
876                 EXIT;
877                 return;
878         }
879
880         llap = llap_from_page(page, 0);
881         if (IS_ERR(llap)) {
882                 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
883                        page->index, PTR_ERR(llap));
884                 EXIT;
885                 return;
886         }
887
888         //llap_write_complete(inode, llap);
889         rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
890                                      llap->llap_cookie);
891         if (rc != 0)
892                 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
893
894         /* this unconditional free is only safe because the page lock
895          * is providing exclusivity to memory pressure/truncate/writeback..*/
896         __clear_page_ll_data(page);
897
898         spin_lock(&sbi->ll_lock);
899         if (!list_empty(&llap->llap_pglist_item))
900                 list_del_init(&llap->llap_pglist_item);
901         sbi->ll_pglist_gen++;
902         sbi->ll_async_page_count--;
903         spin_unlock(&sbi->ll_lock);
904         OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
905         EXIT;
906 }
907
908 static int ll_page_matches(struct page *page, int readahead)
909 {
910         struct lustre_handle match_lockh = {0};
911         struct inode *inode = page->mapping->host;
912         ldlm_policy_data_t page_extent;
913         int flags, matches;
914         ENTRY;
915
916         page_extent.l_extent.start = (__u64)page->index << PAGE_CACHE_SHIFT;
917         page_extent.l_extent.end =
918                 page_extent.l_extent.start + PAGE_CACHE_SIZE - 1;
919         flags = LDLM_FL_TEST_LOCK;
920         if (!readahead)
921                 flags |= LDLM_FL_CBPENDING | LDLM_FL_BLOCK_GRANTED;
922         matches = obd_match(ll_i2sbi(inode)->ll_osc_exp,
923                             ll_i2info(inode)->lli_smd, LDLM_EXTENT,
924                             &page_extent, LCK_PR | LCK_PW, &flags, inode,
925                             &match_lockh);
926         RETURN(matches);
927 }
928
929 static int ll_issue_page_read(struct obd_export *exp,
930                               struct ll_async_page *llap,
931                               struct obd_io_group *oig, int defer)
932 {
933         struct page *page = llap->llap_page;
934         int rc;
935
936         page_cache_get(page);
937         llap->llap_defer_uptodate = defer;
938         llap->llap_ra_used = 0;
939         rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
940                                 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
941                                 PAGE_SIZE, 0, ASYNC_COUNT_STABLE | ASYNC_READY
942                                               | ASYNC_URGENT);
943         if (rc) {
944                 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
945                 page_cache_release(page);
946         }
947         RETURN(rc);
948 }
949
950 static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
951 {
952         LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
953         ra->ra_stats[which]++;
954 }
955
956 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
957 {
958         struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
959         struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
960
961         spin_lock(&sbi->ll_lock);
962         ll_ra_stats_inc_unlocked(ra, which);
963         spin_unlock(&sbi->ll_lock);
964 }
965
966 void ll_ra_accounting(struct page *page, struct address_space *mapping)
967 {
968         struct ll_async_page *llap;
969
970         llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
971         if (IS_ERR(llap))
972                 return;
973
974         if (!llap->llap_defer_uptodate || llap->llap_ra_used)
975                 return;
976
977         ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
978 }
979
980 #define RAS_CDEBUG(ras) \
981         CDEBUG(D_READA, "lrp %lu c %lu ws %lu wl %lu nra %lu\n",        \
982                ras->ras_last_readpage, ras->ras_consecutive,            \
983                ras->ras_window_start, ras->ras_window_len,              \
984                ras->ras_next_readahead);
985
986 static int index_in_window(unsigned long index, unsigned long point,
987                            unsigned long before, unsigned long after)
988 {
989         unsigned long start = point - before, end = point + after;
990
991         if (start > point)
992                start = 0;
993         if (end < point)
994                end = ~0;
995
996         return start <= index && index <= end;
997 }
998
999 static int ll_readahead(struct ll_readahead_state *ras,
1000                          struct obd_export *exp, struct address_space *mapping,
1001                          struct obd_io_group *oig, int flags)
1002 {
1003         unsigned long i, start = 0, end = 0, reserved;
1004         struct ll_async_page *llap;
1005         struct page *page;
1006         int rc, ret = 0, match_failed = 0;
1007         __u64 kms;
1008         unsigned int gfp_mask;
1009         ENTRY;
1010
1011         kms = lov_merge_size(ll_i2info(mapping->host)->lli_smd, 1);
1012         if (kms == 0) {
1013                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
1014                 RETURN(0);
1015         }
1016
1017         spin_lock(&ras->ras_lock);
1018         /* reserve a part of the read-ahead window that we'll be issuing */
1019         if (ras->ras_window_len) {
1020                 start = ras->ras_next_readahead;
1021                 end = ras->ras_window_start + ras->ras_window_len - 1;
1022                 end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT));
1023                 ras->ras_next_readahead = max(end, end + 1);
1024
1025                 RAS_CDEBUG(ras);
1026         }
1027         spin_unlock(&ras->ras_lock);
1028
1029         if (end == 0) {
1030                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
1031                 RETURN(0);
1032         }
1033
1034         reserved = ll_ra_count_get(ll_i2sbi(mapping->host), end - start + 1);
1035         if (reserved < end - start + 1)
1036                 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
1037
1038         gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
1039 #ifdef __GFP_NOWARN
1040         gfp_mask |= __GFP_NOWARN;
1041 #endif
1042
1043         for (i = start; reserved > 0 && !match_failed && i <= end; i++) {
1044                 /* skip locked pages from previous readpage calls */
1045                 page = grab_cache_page_nowait_gfp(mapping, i, gfp_mask);
1046                 if (page == NULL) {
1047                         CDEBUG(D_READA, "g_c_p_n failed\n");
1048                         continue;
1049                 }
1050
1051                 /* we do this first so that we can see the page in the /proc
1052                  * accounting */
1053                 llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
1054                 if (IS_ERR(llap) || llap->llap_defer_uptodate)
1055                         goto next_page;
1056
1057                 /* skip completed pages */
1058                 if (Page_Uptodate(page))
1059                         goto next_page;
1060
1061                 /* bail when we hit the end of the lock. */
1062                 if ((rc = ll_page_matches(page, 1)) <= 0) {
1063                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
1064                                        "lock match failed: rc %d\n", rc);
1065                         ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
1066                         match_failed = 1;
1067                         goto next_page;
1068                 }
1069
1070                 rc = ll_issue_page_read(exp, llap, oig, 1);
1071                 if (rc == 0) {
1072                         reserved--;
1073                         ret++;
1074                         LL_CDEBUG_PAGE(D_READA| D_PAGE, page,
1075                                        "started read-ahead\n");
1076                 }
1077                 if (rc) {
1078         next_page:
1079                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
1080                                        "skipping read-ahead\n");
1081
1082                         unlock_page(page);
1083                 }
1084                 page_cache_release(page);
1085         }
1086
1087         LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
1088         if (reserved != 0)
1089                 ll_ra_count_put(ll_i2sbi(mapping->host), reserved);
1090         if (i == end + 1 && end == (kms >> PAGE_CACHE_SHIFT))
1091                 ll_ra_stats_inc(mapping, RA_STAT_EOF);
1092
1093         /* if we didn't get to the end of the region we reserved from
1094          * the ras we need to go back and update the ras so that the
1095          * next read-ahead tries from where we left off.  we only do so
1096          * if the region we failed to issue read-ahead on is still ahead
1097          * of the app and behind the next index to start read-ahead from */
1098         if (i != end + 1) {
1099                 spin_lock(&ras->ras_lock);
1100                 if (i < ras->ras_next_readahead &&
1101                     index_in_window(i, ras->ras_window_start, 0,
1102                                     ras->ras_window_len)) {
1103                         ras->ras_next_readahead = i;
1104                         RAS_CDEBUG(ras);
1105                 }
1106                 spin_unlock(&ras->ras_lock);
1107         }
1108
1109         RETURN(ret);
1110 }
1111
1112 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
1113 {
1114         ras->ras_window_start = index & (~(PTLRPC_MAX_BRW_PAGES - 1));
1115 }
1116
1117 /* called with the ras_lock held or from places where it doesn't matter */
1118 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
1119 {
1120         ras->ras_last_readpage = index;
1121         ras->ras_consecutive = 1;
1122         ras->ras_window_len = 0;
1123         ras_set_start(ras, index);
1124         ras->ras_next_readahead = ras->ras_window_start;
1125
1126         RAS_CDEBUG(ras);
1127 }
1128
1129 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
1130 {
1131         spin_lock_init(&ras->ras_lock);
1132         ras_reset(ras, 0);
1133 }
1134
1135 static void ras_update(struct ll_sb_info *sbi, struct ll_readahead_state *ras,
1136                        unsigned long index, unsigned hit)
1137 {
1138         struct ll_ra_info *ra = &sbi->ll_ra_info;
1139         int zero = 0;
1140         ENTRY;
1141
1142         spin_lock(&sbi->ll_lock);
1143         spin_lock(&ras->ras_lock);
1144
1145         ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
1146
1147         /* reset the read-ahead window in two cases.  First when the app seeks
1148          * or reads to some other part of the file.  Secondly if we get a
1149          * read-ahead miss that we think we've previously issued.  This can
1150          * be a symptom of there being so many read-ahead pages that the VM is
1151          * reclaiming it before we get to it. */
1152         if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
1153                 zero = 1;
1154                 ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
1155         } else if (!hit && ras->ras_window_len &&
1156                    index < ras->ras_next_readahead &&
1157                    index_in_window(index, ras->ras_window_start, 0,
1158                                    ras->ras_window_len)) {
1159                 zero = 1;
1160                 ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
1161         }
1162
1163         if (zero) {
1164                 ras_reset(ras, index);
1165                 GOTO(out_unlock, 0);
1166         }
1167
1168         ras->ras_last_readpage = index;
1169         ras->ras_consecutive++;
1170         ras_set_start(ras, index);
1171         ras->ras_next_readahead = max(ras->ras_window_start,
1172                                       ras->ras_next_readahead);
1173
1174         /* wait for a few pages to arrive before issuing readahead to avoid
1175          * the worst overutilization */
1176         if (ras->ras_consecutive == 3) {
1177                 ras->ras_window_len = PTLRPC_MAX_BRW_PAGES;
1178                 GOTO(out_unlock, 0);
1179         }
1180
1181         /* we need to increase the window sometimes.  we'll arbitrarily
1182          * do it half-way through the pages in an rpc */
1183         if ((index & (PTLRPC_MAX_BRW_PAGES - 1)) ==
1184             (PTLRPC_MAX_BRW_PAGES >> 1)) {
1185                 ras->ras_window_len += PTLRPC_MAX_BRW_PAGES;
1186                 ras->ras_window_len = min(ras->ras_window_len,
1187                                           ra->ra_max_pages);
1188         }
1189
1190         EXIT;
1191 out_unlock:
1192         RAS_CDEBUG(ras);
1193         spin_unlock(&ras->ras_lock);
1194         spin_unlock(&sbi->ll_lock);
1195         return;
1196 }
1197
1198 int ll_writepage(struct page *page)
1199 {
1200         struct inode *inode = page->mapping->host;
1201         struct ll_inode_info *lli = ll_i2info(inode);
1202         struct obd_export *exp;
1203         struct ll_async_page *llap;
1204         int rc = 0;
1205         ENTRY;
1206
1207         LASSERT(!PageDirty(page));
1208         LASSERT(PageLocked(page));
1209
1210         exp = ll_i2obdexp(inode);
1211         if (exp == NULL)
1212                 GOTO(out, rc = -EINVAL);
1213
1214         llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
1215         if (IS_ERR(llap))
1216                 GOTO(out, rc = PTR_ERR(llap));
1217
1218         page_cache_get(page);
1219         if (llap->llap_write_queued) {
1220                 LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
1221                 rc = obd_set_async_flags(exp, lli->lli_smd, NULL,
1222                                          llap->llap_cookie,
1223                                          ASYNC_READY | ASYNC_URGENT);
1224         } else {
1225                 rc = queue_or_sync_write(exp, inode, llap,
1226                                          PAGE_SIZE, ASYNC_READY | ASYNC_URGENT);
1227         }
1228         if (rc)
1229                 page_cache_release(page);
1230 out:
1231         if (rc) {
1232                 if (!lli->lli_async_rc)
1233                         lli->lli_async_rc = rc;
1234                 /* re-dirty page on error so it retries write */
1235                 ll_redirty_page(page);
1236                 unlock_page(page);
1237         }
1238         RETURN(rc);
1239 }
1240
1241 /*
1242  * for now we do our readpage the same on both 2.4 and 2.5.  The kernel's
1243  * read-ahead assumes it is valid to issue readpage all the way up to
1244  * i_size, but our dlm locks make that not the case.  We disable the
1245  * kernel's read-ahead and do our own by walking ahead in the page cache
1246  * checking for dlm lock coverage.  the main difference between 2.4 and
1247  * 2.6 is how read-ahead gets batched and issued, but we're using our own,
1248  * so they look the same.
1249  */
1250 int ll_readpage(struct file *filp, struct page *page)
1251 {
1252         struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
1253         struct inode *inode = page->mapping->host;
1254         struct obd_export *exp;
1255         struct ll_async_page *llap;
1256         struct obd_io_group *oig = NULL;
1257         int rc;
1258         ENTRY;
1259
1260         LASSERT(PageLocked(page));
1261         LASSERT(!PageUptodate(page));
1262         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset=%Lu=%#Lx\n",
1263                inode->i_ino, inode->i_generation, inode,
1264                (((loff_t)page->index) << PAGE_SHIFT),
1265                (((loff_t)page->index) << PAGE_SHIFT));
1266         LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
1267
1268         rc = oig_init(&oig);
1269         if (rc < 0)
1270                 GOTO(out, rc);
1271
1272         exp = ll_i2obdexp(inode);
1273         if (exp == NULL)
1274                 GOTO(out, rc = -EINVAL);
1275
1276         llap = llap_from_page(page, LLAP_ORIGIN_READPAGE);
1277         if (IS_ERR(llap))
1278                 GOTO(out, rc = PTR_ERR(llap));
1279
1280         if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1281                 ras_update(ll_i2sbi(inode), &fd->fd_ras, page->index,
1282                            llap->llap_defer_uptodate);
1283
1284         if (llap->llap_defer_uptodate) {
1285                 llap->llap_ra_used = 1;
1286                 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1287                                   fd->fd_flags);
1288                 if (rc > 0)
1289                         obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
1290                                              NULL, oig);
1291                 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
1292                 SetPageUptodate(page);
1293                 unlock_page(page);
1294                 GOTO(out_oig, rc = 0);
1295         }
1296
1297         rc = ll_page_matches(page, 0);
1298         if (rc < 0) {
1299                 LL_CDEBUG_PAGE(D_ERROR, page, "lock match failed: rc %d\n", rc);
1300                 GOTO(out, rc);
1301         }
1302
1303         if (rc == 0) {
1304                 CWARN("ino %lu page %lu (%llu) not covered by "
1305                       "a lock (mmap?).  check debug logs.\n",
1306                       inode->i_ino, page->index,
1307                       (long long)page->index << PAGE_CACHE_SHIFT);
1308         }
1309
1310         rc = ll_issue_page_read(exp, llap, oig, 0);
1311         if (rc)
1312                 GOTO(out, rc);
1313
1314         LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
1315         if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1316                 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1317                              fd->fd_flags);
1318
1319         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
1320
1321 out:
1322         if (rc)
1323                 unlock_page(page);
1324 out_oig:
1325         if (oig != NULL)
1326                 oig_release(oig);
1327         RETURN(rc);
1328 }