Whamcloud - gitweb
- adopting LMV to 1_4 APIs;
[fs/lustre-release.git] / lustre / llite / rw.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Lite I/O page cache routines shared by different kernel revs
5  *
6  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #include <linux/config.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/string.h>
28 #include <linux/stat.h>
29 #include <linux/errno.h>
30 #include <linux/smp_lock.h>
31 #include <linux/unistd.h>
32 #include <linux/version.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
35
36 #include <linux/fs.h>
37 #include <linux/stat.h>
38 #include <asm/uaccess.h>
39 #include <asm/segment.h>
40 #include <linux/mm.h>
41 #include <linux/pagemap.h>
42 #include <linux/smp_lock.h>
43
44 #define DEBUG_SUBSYSTEM S_LLITE
45
46 #include <linux/lustre_mdc.h>
47 #include <linux/lustre_lite.h>
48 #include "llite_internal.h"
49 #include <linux/lustre_compat25.h>
50
51 #ifndef list_for_each_prev_safe
52 #define list_for_each_prev_safe(pos, n, head) \
53         for (pos = (head)->prev, n = pos->prev; pos != (head); \
54                 pos = n, n = pos->prev )
55 #endif
56
57 kmem_cache_t *ll_async_page_slab = NULL;
58 size_t ll_async_page_slab_size = 0;
59
60 /* SYNCHRONOUS I/O to object storage for an inode */
61 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
62                   struct page *page, int flags)
63 {
64         struct ll_inode_info *lli = ll_i2info(inode);
65         struct lov_stripe_md *lsm = lli->lli_smd;
66         struct brw_page pg;
67         int rc;
68         ENTRY;
69
70         pg.pg = page;
71         pg.off = ((obd_off)page->index) << PAGE_SHIFT;
72
73         if ((cmd & OBD_BRW_WRITE) && (pg.off + PAGE_SIZE > inode->i_size))
74                 pg.count = inode->i_size % PAGE_SIZE;
75         else
76                 pg.count = PAGE_SIZE;
77
78         LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
79                        cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
80                        inode->i_ino, pg.off, pg.off);
81         if (pg.count == 0) {
82                 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
83                        LPU64"\n",
84                        inode->i_ino, inode, inode->i_size, page->mapping->host,
85                        page->mapping->host->i_size, page->index, pg.off);
86         }
87
88         pg.flag = flags;
89
90         if (cmd & OBD_BRW_WRITE)
91                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
92                                     LPROC_LL_BRW_WRITE, pg.count);
93         else
94                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
95                                     LPROC_LL_BRW_READ, pg.count);
96         rc = obd_brw(cmd, ll_i2dtexp(inode), oa, lsm, 1, &pg, NULL);
97         if (rc == 0)
98                 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
99         else if (rc != -EIO)
100                 CERROR("error from obd_brw: rc = %d\n", rc);
101         RETURN(rc);
102 }
103
104 /* this isn't where truncate starts.   roughly:
105  * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate. setattr_raw grabs
106  * DLM lock on [size, EOF], i_mutex, ->lli_size_sem, and WRITE_I_ALLOC_SEM to
107  * avoid races.
108  *
109  * must be called under ->lli_size_sem */
110 void ll_truncate(struct inode *inode)
111 {
112         struct ll_inode_info *lli = ll_i2info(inode);
113         struct lov_stripe_md *lsm = lli->lli_smd;
114         struct ost_lvb lvb;
115         struct obdo oa;
116         int rc;
117         ENTRY;
118         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu=%#Lx\n",inode->i_ino,
119                inode->i_generation, inode, inode->i_size, inode->i_size);
120
121         if (lli->lli_size_sem_owner != current) {
122                 EXIT;
123                 return;
124         }
125
126         if (!lsm) {
127                 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
128                        inode->i_ino);
129                 GOTO(out_unlock, 0);
130         }
131
132         LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
133
134         /* XXX I'm pretty sure this is a hack to paper over a more fundamental
135          * race condition. */
136         lov_stripe_lock(lsm);
137         inode_init_lvb(inode, &lvb);
138         obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 0);
139         if (lvb.lvb_size == inode->i_size) {
140                 CDEBUG(D_VFSTRACE, "skipping punch for obj "LPX64", %Lu=%#Lx\n",
141                        lsm->lsm_object_id, inode->i_size, inode->i_size);
142                 lov_stripe_unlock(lsm);
143                 GOTO(out_unlock, 0);
144         }
145
146         obd_adjust_kms(ll_i2dtexp(inode), lsm, inode->i_size, 1);
147         lov_stripe_unlock(lsm);
148
149         if (unlikely((ll_i2sbi(inode)->ll_flags & LL_SBI_CHECKSUM) &&
150                      (inode->i_size & ~PAGE_MASK))) {
151                 /* If the truncate leaves behind a partial page, update its
152                  * checksum. */
153                 struct page *page = find_get_page(inode->i_mapping,
154                                                   inode->i_size >> PAGE_CACHE_SHIFT);
155                 if (page != NULL) {
156                         struct ll_async_page *llap = llap_cast_private(page);
157                         if (llap != NULL) {
158                                 llap->llap_checksum =
159                                         crc32_le(0, kmap(page), PAGE_SIZE);
160                                 kunmap(page);
161                         }
162                         page_cache_release(page);
163                 }
164         }
165
166         CDEBUG(D_INFO, "calling punch for "LPX64" (new size %Lu=%#Lx)\n",
167                lsm->lsm_object_id, inode->i_size, inode->i_size);
168
169         oa.o_id = lsm->lsm_object_id;
170         oa.o_valid = OBD_MD_FLID;
171
172         obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |
173                         OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME |
174                         OBD_MD_FLFID | OBD_MD_FLGENER);
175
176         ll_inode_size_unlock(inode, 0);
177
178         rc = obd_punch(ll_i2dtexp(inode), &oa, lsm, inode->i_size,
179                        OBD_OBJECT_EOF, NULL);
180         if (rc)
181                 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
182         else
183                 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
184                               OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
185         EXIT;
186         return;
187
188  out_unlock:
189         ll_inode_size_unlock(inode, 0);
190 } /* ll_truncate */
191
192 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
193                      unsigned to)
194 {
195         struct inode *inode = page->mapping->host;
196         struct ll_inode_info *lli = ll_i2info(inode);
197         struct lov_stripe_md *lsm = lli->lli_smd;
198         obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
199         struct brw_page pga;
200         struct obdo oa;
201         struct ost_lvb lvb;
202         int rc = 0;
203         ENTRY;
204
205         LASSERT(PageLocked(page));
206         (void)llap_cast_private(page); /* assertion */
207
208         /* Check to see if we should return -EIO right away */
209         pga.pg = page;
210         pga.off = offset;
211         pga.count = PAGE_SIZE;
212         pga.flag = 0;
213
214         oa.o_mode = inode->i_mode;
215         oa.o_id = lsm->lsm_object_id;
216         oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | OBD_MD_FLTYPE;
217         obdo_from_inode(&oa, inode, OBD_MD_FLFID | OBD_MD_FLGENER);
218
219         rc = obd_brw(OBD_BRW_CHECK, ll_i2dtexp(inode), &oa, lsm,
220                      1, &pga, NULL);
221         if (rc)
222                 RETURN(rc);
223
224         if (PageUptodate(page)) {
225                 LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
226                 RETURN(0);
227         }
228
229         /* We're completely overwriting an existing page, so _don't_ set it up
230          * to date until commit_write */
231         if (from == 0 && to == PAGE_SIZE) {
232                 LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
233                 POISON_PAGE(page, 0x11);
234                 RETURN(0);
235         }
236
237         /* If are writing to a new page, no need to read old data.  The extent
238          * locking will have updated the KMS, and for our purposes here we can
239          * treat it like i_size. */
240         lov_stripe_lock(lsm);
241         inode_init_lvb(inode, &lvb);
242         obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 0);
243         lov_stripe_unlock(lsm);
244         if (lvb.lvb_size <= offset) {
245                 LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
246                                lvb.lvb_size, offset);
247                 memset(kmap(page), 0, PAGE_SIZE);
248                 kunmap(page);
249                 GOTO(prepare_done, rc = 0);
250         }
251
252         /* XXX could be an async ocp read.. read-ahead? */
253         rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
254         if (rc == 0) {
255                 /* bug 1598: don't clobber blksize */
256                 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
257                 obdo_refresh_inode(inode, &oa, oa.o_valid);
258         }
259
260         EXIT;
261  prepare_done:
262         if (rc == 0)
263                 SetPageUptodate(page);
264
265         return rc;
266 }
267
268 static int ll_ap_make_ready(void *data, int cmd)
269 {
270         struct ll_async_page *llap;
271         struct page *page;
272         ENTRY;
273
274         llap = LLAP_FROM_COOKIE(data);
275         page = llap->llap_page;
276
277         LASSERTF(!(cmd & OBD_BRW_READ), "cmd %x page %p ino %lu index %lu\n", cmd, page,
278                  page->mapping->host->i_ino, page->index);
279
280         /* we're trying to write, but the page is locked.. come back later */
281         if (TryLockPage(page))
282                 RETURN(-EAGAIN);
283
284         LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
285         page_cache_get(page);
286
287         /* if we left PageDirty we might get another writepage call
288          * in the future.  list walkers are bright enough
289          * to check page dirty so we can leave it on whatever list
290          * its on.  XXX also, we're called with the cli list so if
291          * we got the page cache list we'd create a lock inversion
292          * with the removepage path which gets the page lock then the
293          * cli lock */
294         clear_page_dirty(page);
295         RETURN(0);
296 }
297
298 /* We have two reasons for giving llite the opportunity to change the
299  * write length of a given queued page as it builds the RPC containing
300  * the page:
301  *
302  * 1) Further extending writes may have landed in the page cache
303  *    since a partial write first queued this page requiring us
304  *    to write more from the page cache.  (No further races are possible, since
305  *    by the time this is called, the page is locked.)
306  * 2) We might have raced with truncate and want to avoid performing
307  *    write RPCs that are just going to be thrown away by the
308  *    truncate's punch on the storage targets.
309  *
310  * The kms serves these purposes as it is set at both truncate and extending
311  * writes.
312  */
313 static int ll_ap_refresh_count(void *data, int cmd)
314 {
315         struct ll_inode_info *lli;
316         struct ll_async_page *llap;
317         struct lov_stripe_md *lsm;
318         struct page *page;
319         struct inode *inode;
320         struct ost_lvb lvb;
321         __u64 kms;
322         ENTRY;
323
324         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
325         LASSERT(cmd != OBD_BRW_READ);
326
327         llap = LLAP_FROM_COOKIE(data);
328         page = llap->llap_page;
329         inode = page->mapping->host;
330         lli = ll_i2info(inode);
331         lsm = lli->lli_smd;
332
333         lov_stripe_lock(lsm);
334         inode_init_lvb(inode, &lvb);
335         obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
336         kms = lvb.lvb_size;
337         lov_stripe_unlock(lsm);
338
339         /* catch race with truncate */
340         if (((__u64)page->index << PAGE_SHIFT) >= kms)
341                 return 0;
342
343         /* catch sub-page write at end of file */
344         if (((__u64)page->index << PAGE_SHIFT) + PAGE_SIZE > kms)
345                 return kms % PAGE_SIZE;
346
347         return PAGE_SIZE;
348 }
349
350 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
351 {
352         struct lov_stripe_md *lsm;
353         obd_flag valid_flags;
354
355         lsm = ll_i2info(inode)->lli_smd;
356
357         oa->o_id = lsm->lsm_object_id;
358         oa->o_valid = OBD_MD_FLID;
359         valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
360         if (cmd & OBD_BRW_WRITE) {
361                 oa->o_valid |= OBD_MD_FLEPOCH;
362                 oa->o_easize = ll_i2info(inode)->lli_io_epoch;
363                 oa->o_uid = inode->i_uid;
364                 oa->o_gid = inode->i_gid;
365
366                 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
367                         OBD_MD_FLUID | OBD_MD_FLGID |
368                         OBD_MD_FLFID | OBD_MD_FLGENER;
369         }
370
371         obdo_from_inode(oa, inode, valid_flags);
372 }
373
374 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
375 {
376         struct ll_async_page *llap;
377         ENTRY;
378
379         llap = LLAP_FROM_COOKIE(data);
380         ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
381
382         EXIT;
383 }
384
385 static struct obd_async_page_ops ll_async_page_ops = {
386         .ap_make_ready =        ll_ap_make_ready,
387         .ap_refresh_count =     ll_ap_refresh_count,
388         .ap_fill_obdo =         ll_ap_fill_obdo,
389         .ap_completion =        ll_ap_completion,
390 };
391
392 struct ll_async_page *llap_cast_private(struct page *page)
393 {
394         struct ll_async_page *llap = (struct ll_async_page *)page_private(page);
395
396         LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
397                  "page %p private %lu gave magic %d which != %d\n",
398                  page, page_private(page), llap->llap_magic, LLAP_MAGIC);
399
400         return llap;
401 }
402
403 /* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
404  *
405  * There is an llap attached onto every page in lustre, linked off @sbi.
406  * We add an llap to the list so we don't lose our place during list walking.
407  * If llaps in the list are being moved they will only move to the end
408  * of the LRU, and we aren't terribly interested in those pages here (we
409  * start at the beginning of the list where the least-used llaps are.
410  */
411 int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
412 {
413         struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
414         unsigned long total, want, count = 0;
415
416         total = sbi->ll_async_page_count;
417
418         /* There can be a large number of llaps (600k or more in a large
419          * memory machine) so the VM 1/6 shrink ratio is likely too much.
420          * Since we are freeing pages also, we don't necessarily want to
421          * shrink so much.  Limit to 40MB of pages + llaps per call. */
422         if (shrink_fraction == 0)
423                 want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
424         else
425                 want = (total + shrink_fraction - 1) / shrink_fraction;
426
427         if (want > 40 << (20 - PAGE_CACHE_SHIFT))
428                 want = 40 << (20 - PAGE_CACHE_SHIFT);
429
430         CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
431                want, total, shrink_fraction);
432
433         spin_lock(&sbi->ll_lock);
434         list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
435
436         while (--total >= 0 && count < want) {
437                 struct page *page;
438                 int keep;
439
440                 if (unlikely(need_resched())) {
441                         spin_unlock(&sbi->ll_lock);
442                         cond_resched();
443                         spin_lock(&sbi->ll_lock);
444                 }
445
446                 llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
447                 list_del_init(&dummy_llap.llap_pglist_item);
448                 if (llap == NULL)
449                         break;
450
451                 page = llap->llap_page;
452                 LASSERT(page != NULL);
453
454                 list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
455
456                 /* Page needs/undergoing IO */
457                 if (TryLockPage(page)) {
458                         LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
459                         continue;
460                 }
461
462                 if (llap->llap_write_queued || PageDirty(page) ||
463                     (!PageUptodate(page) &&
464                      llap->llap_origin != LLAP_ORIGIN_READAHEAD))
465                         keep = 1;
466                 else
467                         keep = 0;
468
469                 LL_CDEBUG_PAGE(D_PAGE, page,"%s LRU page: %s%s%s%s origin %s\n",
470                                keep ? "keep" : "drop",
471                                llap->llap_write_queued ? "wq " : "",
472                                PageDirty(page) ? "pd " : "",
473                                PageUptodate(page) ? "" : "!pu ",
474                                llap->llap_defer_uptodate ? "" : "!du",
475                                llap_origins[llap->llap_origin]);
476
477                 /* If page is dirty or undergoing IO don't discard it */
478                 if (keep) {
479                         unlock_page(page);
480                         continue;
481                 }
482
483                 page_cache_get(page);
484                 spin_unlock(&sbi->ll_lock);
485
486                 if (page->mapping != NULL) {
487                         ll_teardown_mmaps(page->mapping,
488                                          (__u64)page->index<<PAGE_CACHE_SHIFT,
489                                          ((__u64)page->index<<PAGE_CACHE_SHIFT)|
490                                           ~PAGE_CACHE_MASK);
491                         if (!PageDirty(page) && !page_mapped(page)) {
492                                 ll_ra_accounting(llap, page->mapping);
493                                 ll_truncate_complete_page(page);
494                                 ++count;
495                         } else {
496                                 LL_CDEBUG_PAGE(D_PAGE, page, "Not dropping page"
497                                                              " because it is "
498                                                              "%s\n",
499                                                               PageDirty(page)?
500                                                               "dirty":"mapped");
501                         }
502                 }
503                 unlock_page(page);
504                 page_cache_release(page);
505
506                 spin_lock(&sbi->ll_lock);
507         }
508         list_del(&dummy_llap.llap_pglist_item);
509         spin_unlock(&sbi->ll_lock);
510
511         CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
512                count, want, total);
513
514         return count;
515 }
516
517 static struct ll_async_page *llap_from_page(struct page *page, unsigned origin)
518 {
519         struct ll_async_page *llap;
520         struct obd_export *exp;
521         struct inode *inode = page->mapping->host;
522         struct ll_sb_info *sbi;
523         int rc;
524         ENTRY;
525
526         if (!inode) {
527                 static int triggered;
528
529                 if (!triggered) {
530                         LL_CDEBUG_PAGE(D_ERROR, page, "Bug 10047. Wrong anon "
531                                        "page received\n");
532                         libcfs_debug_dumpstack(NULL);
533                         triggered = 1;
534                 }
535                 RETURN(ERR_PTR(-EINVAL));
536         }
537         sbi = ll_i2sbi(inode);
538         LASSERT(ll_async_page_slab);
539         LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
540
541         llap = llap_cast_private(page);
542         if (llap != NULL) {
543                 /* move to end of LRU list, except when page is just about to
544                  * die */
545                 if (origin != LLAP_ORIGIN_REMOVEPAGE) {
546                         spin_lock(&sbi->ll_lock);
547                         sbi->ll_pglist_gen++;
548                         list_del_init(&llap->llap_pglist_item);
549                         list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
550                         spin_unlock(&sbi->ll_lock);
551                 }
552                 GOTO(out, llap);
553         }
554
555         exp = ll_i2dtexp(page->mapping->host);
556         if (exp == NULL)
557                 RETURN(ERR_PTR(-EINVAL));
558
559         /* limit the number of lustre-cached pages */
560         if (sbi->ll_async_page_count >= sbi->ll_async_page_max)
561                 llap_shrink_cache(sbi, 0);
562
563         OBD_SLAB_ALLOC(llap, ll_async_page_slab, SLAB_KERNEL,
564                        ll_async_page_slab_size);
565         if (llap == NULL)
566                 RETURN(ERR_PTR(-ENOMEM));
567         llap->llap_magic = LLAP_MAGIC;
568         llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
569
570         rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
571                                  (obd_off)page->index << PAGE_SHIFT,
572                                  &ll_async_page_ops, llap, &llap->llap_cookie);
573         if (rc) {
574                 OBD_SLAB_FREE(llap, ll_async_page_slab,
575                               ll_async_page_slab_size);
576                 RETURN(ERR_PTR(rc));
577         }
578
579         CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
580                page, llap->llap_cookie, (obd_off)page->index << PAGE_SHIFT);
581         /* also zeroing the PRIVBITS low order bitflags */
582         __set_page_ll_data(page, llap);
583         llap->llap_page = page;
584
585         spin_lock(&sbi->ll_lock);
586         sbi->ll_pglist_gen++;
587         sbi->ll_async_page_count++;
588         list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
589         spin_unlock(&sbi->ll_lock);
590
591  out:
592         if (unlikely(sbi->ll_flags & LL_SBI_CHECKSUM)) {
593                 __u32 csum = 0;
594                 csum = crc32_le(csum, kmap(page), PAGE_SIZE);
595                 kunmap(page);
596                 if (origin == LLAP_ORIGIN_READAHEAD ||
597                     origin == LLAP_ORIGIN_READPAGE) {
598                         llap->llap_checksum = 0;
599                 } else if (origin == LLAP_ORIGIN_COMMIT_WRITE ||
600                            llap->llap_checksum == 0) {
601                         llap->llap_checksum = csum;
602                         CDEBUG(D_PAGE, "page %p cksum %x\n", page, csum);
603                 } else if (llap->llap_checksum == csum) {
604                         /* origin == LLAP_ORIGIN_WRITEPAGE */
605                         CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
606                                page, csum);
607                 } else {
608                         /* origin == LLAP_ORIGIN_WRITEPAGE */
609                         LL_CDEBUG_PAGE(D_ERROR, page, "old cksum %x != new "
610                                        "%x!\n", llap->llap_checksum, csum);
611                 }
612         }
613
614         llap->llap_origin = origin;
615         RETURN(llap);
616 }
617
618 static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
619                                struct ll_async_page *llap,
620                                unsigned to, obd_flag async_flags)
621 {
622         unsigned long size_index = inode->i_size >> PAGE_SHIFT;
623         struct obd_io_group *oig;
624         struct ll_sb_info *sbi = ll_i2sbi(inode);
625         int rc, noquot = llap->llap_ignore_quota ? OBD_BRW_NOQUOTA : 0;
626         ENTRY;
627
628         /* _make_ready only sees llap once we've unlocked the page */
629         llap->llap_write_queued = 1;
630         rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
631                                 llap->llap_cookie, OBD_BRW_WRITE | noquot,
632                                 0, 0, 0, async_flags);
633         if (rc == 0) {
634                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
635                 //llap_write_pending(inode, llap);
636                 GOTO(out, 0);
637         }
638
639         llap->llap_write_queued = 0;
640
641         rc = oig_init(&oig);
642         if (rc)
643                 GOTO(out, rc);
644
645         /* make full-page requests if we are not at EOF (bug 4410) */
646         if (to != PAGE_SIZE && llap->llap_page->index < size_index) {
647                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
648                                "sync write before EOF: size_index %lu, to %d\n",
649                                size_index, to);
650                 to = PAGE_SIZE;
651         } else if (to != PAGE_SIZE && llap->llap_page->index == size_index) {
652                 int size_to = inode->i_size & ~PAGE_MASK;
653                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
654                                "sync write at EOF: size_index %lu, to %d/%d\n",
655                                size_index, to, size_to);
656                 if (to < size_to)
657                         to = size_to;
658         }
659
660         /* compare the checksum once before the page leaves llite */
661         if (unlikely((sbi->ll_flags & LL_SBI_CHECKSUM) &&
662                      llap->llap_checksum != 0)) {
663                 __u32 csum = 0;
664                 struct page *page = llap->llap_page;
665                 csum = crc32_le(csum, kmap(page), PAGE_SIZE);
666                 kunmap(page);
667                 if (llap->llap_checksum == csum) {
668                         CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
669                                page, csum);
670                 } else {
671                         CERROR("page %p old cksum %x != new cksum %x!\n",
672                                page, llap->llap_checksum, csum);
673                 }
674         }
675
676         rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
677                                 llap->llap_cookie, OBD_BRW_WRITE | noquot,
678                                 0, to, 0, ASYNC_READY | ASYNC_URGENT |
679                                 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
680         if (rc)
681                 GOTO(free_oig, rc);
682
683         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
684         if (rc)
685                 GOTO(free_oig, rc);
686
687         rc = oig_wait(oig);
688
689         if (!rc && async_flags & ASYNC_READY)
690                 unlock_page(llap->llap_page);
691
692         LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
693
694 free_oig:
695         oig_release(oig);
696 out:
697         RETURN(rc);
698 }
699
700 /* update our write count to account for i_size increases that may have
701  * happened since we've queued the page for io. */
702
703 /* be careful not to return success without setting the page Uptodate or
704  * the next pass through prepare_write will read in stale data from disk. */
705 int ll_commit_write(struct file *file, struct page *page, unsigned from,
706                     unsigned to)
707 {
708         struct inode *inode = page->mapping->host;
709         struct ll_inode_info *lli = ll_i2info(inode);
710         struct lov_stripe_md *lsm = lli->lli_smd;
711         struct obd_export *exp;
712         struct ll_async_page *llap;
713         loff_t size;
714         int rc = 0;
715         ENTRY;
716
717         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
718         LASSERT(inode == file->f_dentry->d_inode);
719         LASSERT(PageLocked(page));
720
721         CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
722                inode, page, from, to, page->index);
723
724         llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
725         if (IS_ERR(llap))
726                 RETURN(PTR_ERR(llap));
727
728         exp = ll_i2dtexp(inode);
729         if (exp == NULL)
730                 RETURN(-EINVAL);
731
732         llap->llap_ignore_quota = capable(CAP_SYS_RESOURCE);
733
734         /* queue a write for some time in the future the first time we
735          * dirty the page */
736         if (!PageDirty(page)) {
737                 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
738                                      LPROC_LL_DIRTY_MISSES);
739
740                 rc = queue_or_sync_write(exp, inode, llap, to, 0);
741                 if (rc)
742                         GOTO(out, rc);
743         } else {
744                 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
745                                      LPROC_LL_DIRTY_HITS);
746         }
747
748         /* put the page in the page cache, from now on ll_removepage is
749          * responsible for cleaning up the llap.
750          * only set page dirty when it's queued to be write out */
751         if (llap->llap_write_queued)
752                 set_page_dirty(page);
753
754 out:
755         size = (((obd_off)page->index) << PAGE_SHIFT) + to;
756         ll_inode_size_lock(inode, 0);
757         if (rc == 0) {
758                 lov_stripe_lock(lsm);
759                 obd_adjust_kms(exp, lsm, size, 0);
760                 lov_stripe_unlock(lsm);
761                 if (size > inode->i_size)
762                         inode->i_size = size;
763                 SetPageUptodate(page);
764         } else if (size > inode->i_size) {
765                 /* this page beyond the pales of i_size, so it can't be
766                  * truncated in ll_p_r_e during lock revoking. we must
767                  * teardown our book-keeping here. */
768                 ll_removepage(page);
769         }
770         ll_inode_size_unlock(inode, 0);
771         RETURN(rc);
772 }
773
774 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
775 {
776         struct ll_ra_info *ra = &sbi->ll_ra_info;
777         unsigned long ret;
778         ENTRY;
779
780         spin_lock(&sbi->ll_lock);
781         ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
782         ra->ra_cur_pages += ret;
783         spin_unlock(&sbi->ll_lock);
784
785         RETURN(ret);
786 }
787
788 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
789 {
790         struct ll_ra_info *ra = &sbi->ll_ra_info;
791         spin_lock(&sbi->ll_lock);
792         LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
793                  ra->ra_cur_pages, len);
794         ra->ra_cur_pages -= len;
795         spin_unlock(&sbi->ll_lock);
796 }
797
798 /* called for each page in a completed rpc.*/
799 void ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
800 {
801         struct ll_async_page *llap;
802         struct page *page;
803         ENTRY;
804
805         llap = LLAP_FROM_COOKIE(data);
806         page = llap->llap_page;
807         LASSERT(PageLocked(page));
808
809         LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
810
811         if (cmd & OBD_BRW_READ && llap->llap_defer_uptodate)
812                 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
813
814         if (rc == 0)  {
815                 if (cmd & OBD_BRW_READ) {
816                         if (!llap->llap_defer_uptodate)
817                                 SetPageUptodate(page);
818                 } else {
819                         llap->llap_write_queued = 0;
820                 }
821                 ClearPageError(page);
822         } else {
823                 if (cmd & OBD_BRW_READ) {
824                         llap->llap_defer_uptodate = 0;
825                 } else {
826                         ll_redirty_page(page);
827                 }
828                 SetPageError(page);
829         }
830
831         unlock_page(page);
832
833         if (0 && cmd & OBD_BRW_WRITE) {
834                 llap_write_complete(page->mapping->host, llap);
835                 ll_try_done_writing(page->mapping->host);
836         }
837
838         if (PageWriteback(page)) {
839                 end_page_writeback(page);
840         }
841         page_cache_release(page);
842         EXIT;
843 }
844
845 /* the kernel calls us here when a page is unhashed from the page cache.
846  * the page will be locked and the kernel is holding a spinlock, so
847  * we need to be careful.  we're just tearing down our book-keeping
848  * here. */
849 void ll_removepage(struct page *page)
850 {
851         struct inode *inode = page->mapping->host;
852         struct obd_export *exp;
853         struct ll_async_page *llap;
854         struct ll_sb_info *sbi = ll_i2sbi(inode);
855         int rc;
856         ENTRY;
857
858         LASSERT(!in_interrupt());
859
860         /* sync pages or failed read pages can leave pages in the page
861          * cache that don't have our data associated with them anymore */
862         if (page_private(page) == 0) {
863                 EXIT;
864                 return;
865         }
866
867         LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
868
869         exp = ll_i2dtexp(inode);
870         if (exp == NULL) {
871                 CERROR("page %p ind %lu gave null export\n", page, page->index);
872                 EXIT;
873                 return;
874         }
875
876         llap = llap_from_page(page, 0);
877         if (IS_ERR(llap)) {
878                 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
879                        page->index, PTR_ERR(llap));
880                 EXIT;
881                 return;
882         }
883
884         //llap_write_complete(inode, llap);
885         rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
886                                      llap->llap_cookie);
887         if (rc != 0)
888                 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
889
890         /* this unconditional free is only safe because the page lock
891          * is providing exclusivity to memory pressure/truncate/writeback..*/
892         __clear_page_ll_data(page);
893
894         spin_lock(&sbi->ll_lock);
895         if (!list_empty(&llap->llap_pglist_item))
896                 list_del_init(&llap->llap_pglist_item);
897         sbi->ll_pglist_gen++;
898         sbi->ll_async_page_count--;
899         spin_unlock(&sbi->ll_lock);
900         OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
901         EXIT;
902 }
903
904 static int ll_page_matches(struct page *page, int fd_flags)
905 {
906         struct lustre_handle match_lockh = {0};
907         struct inode *inode = page->mapping->host;
908         ldlm_policy_data_t page_extent;
909         int flags, matches;
910         ENTRY;
911
912         if (unlikely(fd_flags & LL_FILE_GROUP_LOCKED))
913                 RETURN(1);
914
915         page_extent.l_extent.start = (__u64)page->index << PAGE_CACHE_SHIFT;
916         page_extent.l_extent.end =
917                 page_extent.l_extent.start + PAGE_CACHE_SIZE - 1;
918         flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
919         if (!(fd_flags & LL_FILE_READAHEAD))
920                 flags |= LDLM_FL_CBPENDING;
921         matches = obd_match(ll_i2sbi(inode)->ll_dt_exp,
922                             ll_i2info(inode)->lli_smd, LDLM_EXTENT,
923                             &page_extent, LCK_PR | LCK_PW, &flags, inode,
924                             &match_lockh);
925         RETURN(matches);
926 }
927
928 static int ll_issue_page_read(struct obd_export *exp,
929                               struct ll_async_page *llap,
930                               struct obd_io_group *oig, int defer)
931 {
932         struct page *page = llap->llap_page;
933         int rc;
934
935         page_cache_get(page);
936         llap->llap_defer_uptodate = defer;
937         llap->llap_ra_used = 0;
938         rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
939                                 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
940                                 PAGE_SIZE, 0, ASYNC_COUNT_STABLE | ASYNC_READY |
941                                               ASYNC_URGENT);
942         if (rc) {
943                 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
944                 page_cache_release(page);
945         }
946         RETURN(rc);
947 }
948
949 static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
950 {
951         LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
952         ra->ra_stats[which]++;
953 }
954
955 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
956 {
957         struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
958         struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
959
960         spin_lock(&sbi->ll_lock);
961         ll_ra_stats_inc_unlocked(ra, which);
962         spin_unlock(&sbi->ll_lock);
963 }
964
965 void ll_ra_accounting(struct ll_async_page *llap, struct address_space *mapping)
966 {
967         if (!llap->llap_defer_uptodate || llap->llap_ra_used)
968                 return;
969
970         ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
971 }
972
973 #define RAS_CDEBUG(ras) \
974         CDEBUG(D_READA, "lrp %lu c %lu ws %lu wl %lu nra %lu\n",        \
975                ras->ras_last_readpage, ras->ras_consecutive,            \
976                ras->ras_window_start, ras->ras_window_len,              \
977                ras->ras_next_readahead);
978
979 static int index_in_window(unsigned long index, unsigned long point,
980                            unsigned long before, unsigned long after)
981 {
982         unsigned long start = point - before, end = point + after;
983
984         if (start > point)
985                start = 0;
986         if (end < point)
987                end = ~0;
988
989         return start <= index && index <= end;
990 }
991
992 static struct ll_readahead_state *ll_ras_get(struct file *f)
993 {
994         struct ll_file_data       *fd;
995
996         fd = LUSTRE_FPRIVATE(f);
997         return &fd->fd_ras;
998 }
999
1000 void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
1001 {
1002         struct ll_readahead_state *ras;
1003
1004         ras = ll_ras_get(f);
1005         rar->lrr_reader = current;
1006
1007         spin_lock(&ras->ras_lock);
1008         list_add(&rar->lrr_linkage, &ras->ras_read_beads);
1009         spin_unlock(&ras->ras_lock);
1010 }
1011
1012 void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
1013 {
1014         struct ll_readahead_state *ras;
1015
1016         ras = ll_ras_get(f);
1017
1018         spin_lock(&ras->ras_lock);
1019         list_del_init(&rar->lrr_linkage);
1020         spin_unlock(&ras->ras_lock);
1021 }
1022
1023 static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
1024 {
1025         struct ll_ra_read *scan;
1026
1027         list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
1028                 if (scan->lrr_reader == current)
1029                         return scan;
1030         }
1031         return NULL;
1032 }
1033
1034 struct ll_ra_read *ll_ra_read_get(struct file *f)
1035 {
1036         struct ll_readahead_state *ras;
1037         struct ll_ra_read         *bead;
1038
1039         ras = ll_ras_get(f);
1040
1041         spin_lock(&ras->ras_lock);
1042         bead = ll_ra_read_get_locked(ras);
1043         spin_unlock(&ras->ras_lock);
1044         return bead;
1045 }
1046
1047 static int ll_readahead(struct ll_readahead_state *ras,
1048                          struct obd_export *exp, struct address_space *mapping,
1049                          struct obd_io_group *oig, int flags)
1050 {
1051         unsigned long i, start = 0, end = 0, reserved;
1052         struct ll_async_page *llap;
1053         struct page *page;
1054         int rc, ret = 0, match_failed = 0;
1055         __u64 kms;
1056         unsigned int gfp_mask;
1057         struct inode *inode;
1058         struct lov_stripe_md *lsm;
1059         struct ll_ra_read *bead;
1060         struct ost_lvb lvb;
1061         ENTRY;
1062
1063         inode = mapping->host;
1064         lsm = ll_i2info(inode)->lli_smd;
1065
1066         lov_stripe_lock(lsm);
1067         inode_init_lvb(inode, &lvb);
1068         obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
1069         kms = lvb.lvb_size;
1070         lov_stripe_unlock(lsm);
1071         if (kms == 0) {
1072                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
1073                 RETURN(0);
1074         }
1075
1076         spin_lock(&ras->ras_lock);
1077         bead = ll_ra_read_get_locked(ras);
1078         /* reserve a part of the read-ahead window that we'll be issuing */
1079         if (ras->ras_window_len) {
1080                 start = ras->ras_next_readahead;
1081                 end = ras->ras_window_start + ras->ras_window_len - 1;
1082         }
1083         if (bead != NULL) {
1084                 pgoff_t read_end;
1085
1086                 start = max(start, bead->lrr_start);
1087                 read_end = bead->lrr_start + bead->lrr_count - 1;
1088                 if (ras->ras_consecutive > start - bead->lrr_start + 1)
1089                         /*
1090                          * if current read(2) is a part of larger sequential
1091                          * read, make sure read-ahead is at least to the end
1092                          * of the read region.
1093                          *
1094                          * XXX nikita: This doesn't work when some pages in
1095                          * [lrr_start, start] were cached (and, as a result,
1096                          * weren't counted in ->ras_consecutive).
1097                          */
1098                         end = max(end, read_end);
1099                 else
1100                         /*
1101                          * otherwise, clip read-ahead at the read boundary.
1102                          */
1103                         end = read_end;
1104         }
1105         if (end != 0) {
1106                 end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT));
1107                 ras->ras_next_readahead = max(end, end + 1);
1108                 RAS_CDEBUG(ras);
1109         }
1110         spin_unlock(&ras->ras_lock);
1111
1112         if (end == 0) {
1113                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
1114                 RETURN(0);
1115         }
1116
1117         reserved = ll_ra_count_get(ll_i2sbi(inode), end - start + 1);
1118         if (reserved < end - start + 1)
1119                 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
1120
1121         gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
1122 #ifdef __GFP_NOWARN
1123         gfp_mask |= __GFP_NOWARN;
1124 #endif
1125
1126         for (i = start; reserved > 0 && !match_failed && i <= end; i++) {
1127                 /* skip locked pages from previous readpage calls */
1128                 page = grab_cache_page_nowait_gfp(mapping, i, gfp_mask);
1129                 if (page == NULL) {
1130                         ll_ra_stats_inc(mapping, RA_STAT_FAILED_GRAB_PAGE);
1131                         CDEBUG(D_READA, "g_c_p_n failed\n");
1132                         continue;
1133                 }
1134
1135                 /* Check if page was truncated or reclaimed */
1136                 if (page->mapping != mapping) {
1137                         ll_ra_stats_inc(mapping, RA_STAT_WRONG_GRAB_PAGE);
1138                         CDEBUG(D_READA, "g_c_p_n returned invalid page\n");
1139                         goto next_page;
1140                 }
1141
1142                 /* we do this first so that we can see the page in the /proc
1143                  * accounting */
1144                 llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
1145                 if (IS_ERR(llap) || llap->llap_defer_uptodate)
1146                         goto next_page;
1147
1148                 /* skip completed pages */
1149                 if (Page_Uptodate(page))
1150                         goto next_page;
1151
1152                 /* bail when we hit the end of the lock. */
1153                 if ((rc = ll_page_matches(page, flags|LL_FILE_READAHEAD)) <= 0){
1154                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
1155                                        "lock match failed: rc %d\n", rc);
1156                         ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
1157                         match_failed = 1;
1158                         goto next_page;
1159                 }
1160
1161                 rc = ll_issue_page_read(exp, llap, oig, 1);
1162                 if (rc == 0) {
1163                         reserved--;
1164                         ret++;
1165                         LL_CDEBUG_PAGE(D_READA| D_PAGE, page,
1166                                        "started read-ahead\n");
1167                 }
1168                 if (rc) {
1169         next_page:
1170                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
1171                                        "skipping read-ahead\n");
1172
1173                         unlock_page(page);
1174                 }
1175                 page_cache_release(page);
1176         }
1177
1178         LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
1179         if (reserved != 0)
1180                 ll_ra_count_put(ll_i2sbi(inode), reserved);
1181         if (i == end + 1 && end == (kms >> PAGE_CACHE_SHIFT))
1182                 ll_ra_stats_inc(mapping, RA_STAT_EOF);
1183
1184         /* if we didn't get to the end of the region we reserved from
1185          * the ras we need to go back and update the ras so that the
1186          * next read-ahead tries from where we left off.  we only do so
1187          * if the region we failed to issue read-ahead on is still ahead
1188          * of the app and behind the next index to start read-ahead from */
1189         if (i != end + 1) {
1190                 spin_lock(&ras->ras_lock);
1191                 if (i < ras->ras_next_readahead &&
1192                     index_in_window(i, ras->ras_window_start, 0,
1193                                     ras->ras_window_len)) {
1194                         ras->ras_next_readahead = i;
1195                         RAS_CDEBUG(ras);
1196                 }
1197                 spin_unlock(&ras->ras_lock);
1198         }
1199
1200         RETURN(ret);
1201 }
1202
1203 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
1204 {
1205         ras->ras_window_start = index & (~(PTLRPC_MAX_BRW_PAGES - 1));
1206 }
1207
1208 /* called with the ras_lock held or from places where it doesn't matter */
1209 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
1210 {
1211         ras->ras_last_readpage = index;
1212         ras->ras_consecutive = 1;
1213         ras->ras_window_len = 0;
1214         ras_set_start(ras, index);
1215         ras->ras_next_readahead = ras->ras_window_start;
1216
1217         RAS_CDEBUG(ras);
1218 }
1219
1220 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
1221 {
1222         spin_lock_init(&ras->ras_lock);
1223         ras_reset(ras, 0);
1224         INIT_LIST_HEAD(&ras->ras_read_beads);
1225 }
1226
1227 static void ras_update(struct ll_sb_info *sbi, struct ll_readahead_state *ras,
1228                        unsigned long index, unsigned hit)
1229 {
1230         struct ll_ra_info *ra = &sbi->ll_ra_info;
1231         int zero = 0;
1232         ENTRY;
1233
1234         spin_lock(&sbi->ll_lock);
1235         spin_lock(&ras->ras_lock);
1236
1237         ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
1238
1239         /* reset the read-ahead window in two cases.  First when the app seeks
1240          * or reads to some other part of the file.  Secondly if we get a
1241          * read-ahead miss that we think we've previously issued.  This can
1242          * be a symptom of there being so many read-ahead pages that the VM is
1243          * reclaiming it before we get to it. */
1244         if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
1245                 zero = 1;
1246                 ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
1247         } else if (!hit && ras->ras_window_len &&
1248                    index < ras->ras_next_readahead &&
1249                    index_in_window(index, ras->ras_window_start, 0,
1250                                    ras->ras_window_len)) {
1251                 zero = 1;
1252                 ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
1253         }
1254
1255         if (zero) {
1256                 ras_reset(ras, index);
1257                 GOTO(out_unlock, 0);
1258         }
1259
1260         ras->ras_last_readpage = index;
1261         ras->ras_consecutive++;
1262         ras_set_start(ras, index);
1263         ras->ras_next_readahead = max(ras->ras_window_start,
1264                                       ras->ras_next_readahead);
1265
1266         /* wait for a few pages to arrive before issuing readahead to avoid
1267          * the worst overutilization */
1268         if (ras->ras_consecutive == 3) {
1269                 ras->ras_window_len = PTLRPC_MAX_BRW_PAGES;
1270                 GOTO(out_unlock, 0);
1271         }
1272
1273         /* we need to increase the window sometimes.  we'll arbitrarily
1274          * do it half-way through the pages in an rpc */
1275         if ((index & (PTLRPC_MAX_BRW_PAGES - 1)) ==
1276             (PTLRPC_MAX_BRW_PAGES >> 1)) {
1277                 ras->ras_window_len += PTLRPC_MAX_BRW_PAGES;
1278                 ras->ras_window_len = min(ras->ras_window_len,
1279                                           ra->ra_max_pages);
1280         }
1281
1282         EXIT;
1283 out_unlock:
1284         RAS_CDEBUG(ras);
1285         spin_unlock(&ras->ras_lock);
1286         spin_unlock(&sbi->ll_lock);
1287         return;
1288 }
1289
1290 int ll_writepage(struct page *page)
1291 {
1292         struct inode *inode = page->mapping->host;
1293         struct ll_inode_info *lli = ll_i2info(inode);
1294         struct obd_export *exp;
1295         struct ll_async_page *llap;
1296         int rc = 0;
1297         ENTRY;
1298
1299         LASSERT(!PageDirty(page));
1300         LASSERT(PageLocked(page));
1301
1302         exp = ll_i2dtexp(inode);
1303         if (exp == NULL)
1304                 GOTO(out, rc = -EINVAL);
1305
1306         llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
1307         if (IS_ERR(llap))
1308                 GOTO(out, rc = PTR_ERR(llap));
1309
1310         page_cache_get(page);
1311         if (llap->llap_write_queued) {
1312                 LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
1313                 rc = obd_set_async_flags(exp, lli->lli_smd, NULL,
1314                                          llap->llap_cookie,
1315                                          ASYNC_READY | ASYNC_URGENT);
1316         } else {
1317                 rc = queue_or_sync_write(exp, inode, llap, PAGE_SIZE,
1318                                          ASYNC_READY | ASYNC_URGENT);
1319         }
1320         if (rc)
1321                 page_cache_release(page);
1322 out:
1323         if (rc) {
1324                 if (!lli->lli_async_rc)
1325                         lli->lli_async_rc = rc;
1326                 /* re-dirty page on error so it retries write */
1327                 ll_redirty_page(page);
1328                 unlock_page(page);
1329         }
1330         RETURN(rc);
1331 }
1332
1333 /*
1334  * for now we do our readpage the same on both 2.4 and 2.5.  The kernel's
1335  * read-ahead assumes it is valid to issue readpage all the way up to
1336  * i_size, but our dlm locks make that not the case.  We disable the
1337  * kernel's read-ahead and do our own by walking ahead in the page cache
1338  * checking for dlm lock coverage.  the main difference between 2.4 and
1339  * 2.6 is how read-ahead gets batched and issued, but we're using our own,
1340  * so they look the same.
1341  */
1342 int ll_readpage(struct file *filp, struct page *page)
1343 {
1344         struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
1345         struct inode *inode = page->mapping->host;
1346         struct obd_export *exp;
1347         struct ll_async_page *llap;
1348         struct obd_io_group *oig = NULL;
1349         int rc;
1350         ENTRY;
1351
1352         LASSERT(PageLocked(page));
1353         LASSERT(!PageUptodate(page));
1354         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset=%Lu=%#Lx\n",
1355                inode->i_ino, inode->i_generation, inode,
1356                (((loff_t)page->index) << PAGE_SHIFT),
1357                (((loff_t)page->index) << PAGE_SHIFT));
1358         LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
1359
1360         rc = oig_init(&oig);
1361         if (rc < 0)
1362                 GOTO(out, rc);
1363
1364         exp = ll_i2dtexp(inode);
1365         if (exp == NULL)
1366                 GOTO(out, rc = -EINVAL);
1367
1368         llap = llap_from_page(page, LLAP_ORIGIN_READPAGE);
1369         if (IS_ERR(llap))
1370                 GOTO(out, rc = PTR_ERR(llap));
1371
1372         if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1373                 ras_update(ll_i2sbi(inode), &fd->fd_ras, page->index,
1374                            llap->llap_defer_uptodate);
1375
1376         if (llap->llap_defer_uptodate) {
1377                 llap->llap_ra_used = 1;
1378                 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1379                                   fd->fd_flags);
1380                 if (rc > 0)
1381                         obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
1382                                              NULL, oig);
1383                 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
1384                 SetPageUptodate(page);
1385                 unlock_page(page);
1386                 GOTO(out_oig, rc = 0);
1387         }
1388
1389         if (likely((fd->fd_flags & LL_FILE_IGNORE_LOCK) == 0)) {
1390                 rc = ll_page_matches(page, fd->fd_flags);
1391                 if (rc < 0) {
1392                         LL_CDEBUG_PAGE(D_ERROR, page, "lock match failed: rc %d\n", rc);
1393                         GOTO(out, rc);
1394                 }
1395
1396                 if (rc == 0) {
1397                         CWARN("ino %lu page %lu (%llu) not covered by "
1398                               "a lock (mmap?).  check debug logs.\n",
1399                               inode->i_ino, page->index,
1400                               (long long)page->index << PAGE_CACHE_SHIFT);
1401                 }
1402         }
1403
1404         rc = ll_issue_page_read(exp, llap, oig, 0);
1405         if (rc)
1406                 GOTO(out, rc);
1407
1408         LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
1409         if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1410                 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1411                              fd->fd_flags);
1412
1413         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
1414
1415 out:
1416         if (rc)
1417                 unlock_page(page);
1418 out_oig:
1419         if (oig != NULL)
1420                 oig_release(oig);
1421         RETURN(rc);
1422 }