Whamcloud - gitweb
Branch b1_6
[fs/lustre-release.git] / lustre / llite / rw.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Lite I/O page cache routines shared by different kernel revs
5  *
6  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23 #ifdef HAVE_KERNEL_CONFIG_H
24 #include <linux/config.h>
25 #endif
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/string.h>
29 #include <linux/stat.h>
30 #include <linux/errno.h>
31 #include <linux/smp_lock.h>
32 #include <linux/unistd.h>
33 #include <linux/version.h>
34 #include <asm/system.h>
35 #include <asm/uaccess.h>
36
37 #include <linux/fs.h>
38 #include <linux/stat.h>
39 #include <asm/uaccess.h>
40 #include <asm/segment.h>
41 #include <linux/mm.h>
42 #include <linux/pagemap.h>
43 #include <linux/smp_lock.h>
44
45 #define DEBUG_SUBSYSTEM S_LLITE
46
47 #include <lustre_lite.h>
48 #include "llite_internal.h"
49 #include <linux/lustre_compat25.h>
50
51 #ifndef list_for_each_prev_safe
52 #define list_for_each_prev_safe(pos, n, head) \
53         for (pos = (head)->prev, n = pos->prev; pos != (head); \
54                 pos = n, n = pos->prev )
55 #endif
56
57 cfs_mem_cache_t *ll_async_page_slab = NULL;
58 size_t ll_async_page_slab_size = 0;
59
60 /* SYNCHRONOUS I/O to object storage for an inode */
61 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
62                   struct page *page, int flags)
63 {
64         struct ll_inode_info *lli = ll_i2info(inode);
65         struct lov_stripe_md *lsm = lli->lli_smd;
66         struct obd_info oinfo = { { { 0 } } };
67         struct brw_page pg;
68         int rc;
69         ENTRY;
70
71         pg.pg = page;
72         pg.off = ((obd_off)page->index) << CFS_PAGE_SHIFT;
73
74         if ((cmd & OBD_BRW_WRITE) && (pg.off + CFS_PAGE_SIZE > inode->i_size))
75                 pg.count = inode->i_size % CFS_PAGE_SIZE;
76         else
77                 pg.count = CFS_PAGE_SIZE;
78
79         LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
80                        cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
81                        inode->i_ino, pg.off, pg.off);
82         if (pg.count == 0) {
83                 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
84                        LPU64"\n",
85                        inode->i_ino, inode, inode->i_size, page->mapping->host,
86                        page->mapping->host->i_size, page->index, pg.off);
87         }
88
89         pg.flag = flags;
90
91         if (cmd & OBD_BRW_WRITE)
92                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_WRITE,
93                                    pg.count);
94         else
95                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_READ,
96                            pg.count);
97         oinfo.oi_oa = oa;
98         oinfo.oi_md = lsm;
99         rc = obd_brw(cmd, ll_i2obdexp(inode), &oinfo, 1, &pg, NULL);
100         if (rc == 0)
101                 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
102         else if (rc != -EIO)
103                 CERROR("error from obd_brw: rc = %d\n", rc);
104         RETURN(rc);
105 }
106
107 /* this isn't where truncate starts.   roughly:
108  * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate. setattr_raw grabs
109  * DLM lock on [size, EOF], i_mutex, ->lli_size_sem, and WRITE_I_ALLOC_SEM to
110  * avoid races.
111  *
112  * must be called under ->lli_size_sem */
113 void ll_truncate(struct inode *inode)
114 {
115         struct ll_inode_info *lli = ll_i2info(inode);
116         struct obd_info oinfo = { { { 0 } } };
117         struct ost_lvb lvb;
118         struct obdo oa;
119         int rc;
120         ENTRY;
121         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu=%#Lx\n",inode->i_ino,
122                inode->i_generation, inode, inode->i_size, inode->i_size);
123
124         ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
125         if (lli->lli_size_sem_owner != current) {
126                 EXIT;
127                 return;
128         }
129
130         if (!lli->lli_smd) {
131                 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
132                        inode->i_ino);
133                 GOTO(out_unlock, 0);
134         }
135
136         LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
137
138         /* XXX I'm pretty sure this is a hack to paper over a more fundamental
139          * race condition. */
140         lov_stripe_lock(lli->lli_smd);
141         inode_init_lvb(inode, &lvb);
142         obd_merge_lvb(ll_i2obdexp(inode), lli->lli_smd, &lvb, 0);
143         if (lvb.lvb_size == inode->i_size) {
144                 CDEBUG(D_VFSTRACE, "skipping punch for obj "LPX64", %Lu=%#Lx\n",
145                        lli->lli_smd->lsm_object_id,inode->i_size,inode->i_size);
146                 lov_stripe_unlock(lli->lli_smd);
147                 GOTO(out_unlock, 0);
148         }
149
150         obd_adjust_kms(ll_i2obdexp(inode), lli->lli_smd, inode->i_size, 1);
151         lov_stripe_unlock(lli->lli_smd);
152
153         if (unlikely((ll_i2sbi(inode)->ll_flags & LL_SBI_CHECKSUM) &&
154                      (inode->i_size & ~CFS_PAGE_MASK))) {
155                 /* If the truncate leaves behind a partial page, update its
156                  * checksum. */
157                 struct page *page = find_get_page(inode->i_mapping,
158                                                   inode->i_size >> CFS_PAGE_SHIFT);
159                 if (page != NULL) {
160                         struct ll_async_page *llap = llap_cast_private(page);
161                         if (llap != NULL) {
162                                 llap->llap_checksum =
163                                         crc32_le(0, kmap(page), CFS_PAGE_SIZE);
164                                 kunmap(page);
165                         }
166                         page_cache_release(page);
167                 }
168         }
169
170         CDEBUG(D_INFO, "calling punch for "LPX64" (new size %Lu=%#Lx)\n",
171                lli->lli_smd->lsm_object_id, inode->i_size, inode->i_size);
172
173         oinfo.oi_md = lli->lli_smd;
174         oinfo.oi_policy.l_extent.start = inode->i_size;
175         oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
176         oinfo.oi_oa = &oa;
177         oa.o_id = lli->lli_smd->lsm_object_id;
178         oa.o_valid = OBD_MD_FLID;
179
180         obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |OBD_MD_FLFID|
181                         OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME |
182                         OBD_MD_FLUID | OBD_MD_FLGID | OBD_MD_FLGENER | 
183                         OBD_MD_FLBLOCKS);
184
185         ll_inode_size_unlock(inode, 0);
186
187         rc = obd_punch_rqset(ll_i2obdexp(inode), &oinfo, NULL);
188         if (rc)
189                 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
190         else
191                 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
192                               OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
193         EXIT;
194         return;
195
196  out_unlock:
197         ll_inode_size_unlock(inode, 0);
198 } /* ll_truncate */
199
200 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
201                      unsigned to)
202 {
203         struct inode *inode = page->mapping->host;
204         struct ll_inode_info *lli = ll_i2info(inode);
205         struct lov_stripe_md *lsm = lli->lli_smd;
206         obd_off offset = ((obd_off)page->index) << CFS_PAGE_SHIFT;
207         struct obd_info oinfo = { { { 0 } } };
208         struct brw_page pga;
209         struct obdo oa;
210         struct ost_lvb lvb;
211         int rc = 0;
212         ENTRY;
213
214         LASSERT(PageLocked(page));
215         (void)llap_cast_private(page); /* assertion */
216
217         /* Check to see if we should return -EIO right away */
218         pga.pg = page;
219         pga.off = offset;
220         pga.count = CFS_PAGE_SIZE;
221         pga.flag = 0;
222
223         oa.o_mode = inode->i_mode;
224         oa.o_id = lsm->lsm_object_id;
225         oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | OBD_MD_FLTYPE;
226         obdo_from_inode(&oa, inode, OBD_MD_FLFID | OBD_MD_FLGENER);
227
228         oinfo.oi_oa = &oa;
229         oinfo.oi_md = lsm;
230         rc = obd_brw(OBD_BRW_CHECK, ll_i2obdexp(inode), &oinfo, 1, &pga, NULL);
231         if (rc)
232                 RETURN(rc);
233
234         if (PageUptodate(page)) {
235                 LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
236                 RETURN(0);
237         }
238
239         /* We're completely overwriting an existing page, so _don't_ set it up
240          * to date until commit_write */
241         if (from == 0 && to == CFS_PAGE_SIZE) {
242                 LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
243                 POISON_PAGE(page, 0x11);
244                 RETURN(0);
245         }
246
247         /* If are writing to a new page, no need to read old data.  The extent
248          * locking will have updated the KMS, and for our purposes here we can
249          * treat it like i_size. */
250         lov_stripe_lock(lsm);
251         inode_init_lvb(inode, &lvb);
252         obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
253         lov_stripe_unlock(lsm);
254         if (lvb.lvb_size <= offset) {
255                 LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
256                                lvb.lvb_size, offset);
257                 memset(kmap(page), 0, CFS_PAGE_SIZE);
258                 kunmap(page);
259                 GOTO(prepare_done, rc = 0);
260         }
261
262         /* XXX could be an async ocp read.. read-ahead? */
263         rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
264         if (rc == 0) {
265                 /* bug 1598: don't clobber blksize */
266                 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
267                 obdo_refresh_inode(inode, &oa, oa.o_valid);
268         }
269
270         EXIT;
271  prepare_done:
272         if (rc == 0)
273                 SetPageUptodate(page);
274
275         return rc;
276 }
277
278 static int ll_ap_make_ready(void *data, int cmd)
279 {
280         struct ll_async_page *llap;
281         struct page *page;
282         ENTRY;
283
284         llap = LLAP_FROM_COOKIE(data);
285         page = llap->llap_page;
286
287         LASSERTF(!(cmd & OBD_BRW_READ), "cmd %x page %p ino %lu index %lu\n", cmd, page,
288                  page->mapping->host->i_ino, page->index);
289
290         /* we're trying to write, but the page is locked.. come back later */
291         if (TryLockPage(page))
292                 RETURN(-EAGAIN);
293
294         LASSERT(!PageWriteback(page));
295
296         /* if we left PageDirty we might get another writepage call
297          * in the future.  list walkers are bright enough
298          * to check page dirty so we can leave it on whatever list
299          * its on.  XXX also, we're called with the cli list so if
300          * we got the page cache list we'd create a lock inversion
301          * with the removepage path which gets the page lock then the
302          * cli lock */
303 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
304         clear_page_dirty(page);
305 #else
306         LASSERTF(!PageWriteback(page),"cmd %x page %p ino %lu index %lu\n", cmd, page,
307                  page->mapping->host->i_ino, page->index);
308         clear_page_dirty_for_io(page);
309
310         /* This actually clears the dirty bit in the radix tree.*/
311         set_page_writeback(page);
312 #endif
313
314         LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
315         page_cache_get(page);
316
317         RETURN(0);
318 }
319
320 /* We have two reasons for giving llite the opportunity to change the
321  * write length of a given queued page as it builds the RPC containing
322  * the page:
323  *
324  * 1) Further extending writes may have landed in the page cache
325  *    since a partial write first queued this page requiring us
326  *    to write more from the page cache.  (No further races are possible, since
327  *    by the time this is called, the page is locked.)
328  * 2) We might have raced with truncate and want to avoid performing
329  *    write RPCs that are just going to be thrown away by the
330  *    truncate's punch on the storage targets.
331  *
332  * The kms serves these purposes as it is set at both truncate and extending
333  * writes.
334  */
335 static int ll_ap_refresh_count(void *data, int cmd)
336 {
337         struct ll_inode_info *lli;
338         struct ll_async_page *llap;
339         struct lov_stripe_md *lsm;
340         struct page *page;
341         struct inode *inode;
342         struct ost_lvb lvb;
343         __u64 kms;
344         ENTRY;
345
346         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
347         LASSERT(cmd != OBD_BRW_READ);
348
349         llap = LLAP_FROM_COOKIE(data);
350         page = llap->llap_page;
351         inode = page->mapping->host;
352         lli = ll_i2info(inode);
353         lsm = lli->lli_smd;
354
355         lov_stripe_lock(lsm);
356         inode_init_lvb(inode, &lvb);
357         obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
358         kms = lvb.lvb_size;
359         lov_stripe_unlock(lsm);
360
361         /* catch race with truncate */
362         if (((__u64)page->index << CFS_PAGE_SHIFT) >= kms)
363                 return 0;
364
365         /* catch sub-page write at end of file */
366         if (((__u64)page->index << CFS_PAGE_SHIFT) + CFS_PAGE_SIZE > kms)
367                 return kms % CFS_PAGE_SIZE;
368
369         return CFS_PAGE_SIZE;
370 }
371
372 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
373 {
374         struct lov_stripe_md *lsm;
375         obd_flag valid_flags;
376
377         lsm = ll_i2info(inode)->lli_smd;
378
379         oa->o_id = lsm->lsm_object_id;
380         oa->o_valid = OBD_MD_FLID;
381         valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
382         if (cmd & OBD_BRW_WRITE) {
383                 oa->o_valid |= OBD_MD_FLEPOCH;
384                 oa->o_easize = ll_i2info(inode)->lli_io_epoch;
385
386                 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
387                         OBD_MD_FLUID | OBD_MD_FLGID |
388                         OBD_MD_FLFID | OBD_MD_FLGENER;
389         }
390
391         obdo_from_inode(oa, inode, valid_flags);
392 }
393
394 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
395 {
396         struct ll_async_page *llap;
397         ENTRY;
398
399         llap = LLAP_FROM_COOKIE(data);
400         ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
401
402         EXIT;
403 }
404
405 static void ll_ap_update_obdo(void *data, int cmd, struct obdo *oa,
406                               obd_valid valid)
407 {
408         struct ll_async_page *llap;
409         ENTRY;
410
411         llap = LLAP_FROM_COOKIE(data);
412         obdo_from_inode(oa, llap->llap_page->mapping->host, valid);
413
414         EXIT;
415 }
416
417 static struct obd_async_page_ops ll_async_page_ops = {
418         .ap_make_ready =        ll_ap_make_ready,
419         .ap_refresh_count =     ll_ap_refresh_count,
420         .ap_fill_obdo =         ll_ap_fill_obdo,
421         .ap_update_obdo =       ll_ap_update_obdo,
422         .ap_completion =        ll_ap_completion,
423 };
424
425 struct ll_async_page *llap_cast_private(struct page *page)
426 {
427         struct ll_async_page *llap = (struct ll_async_page *)page_private(page);
428
429         LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
430                  "page %p private %lu gave magic %d which != %d\n",
431                  page, page_private(page), llap->llap_magic, LLAP_MAGIC);
432
433         return llap;
434 }
435
436 /* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
437  *
438  * There is an llap attached onto every page in lustre, linked off @sbi.
439  * We add an llap to the list so we don't lose our place during list walking.
440  * If llaps in the list are being moved they will only move to the end
441  * of the LRU, and we aren't terribly interested in those pages here (we
442  * start at the beginning of the list where the least-used llaps are.
443  */
444 int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
445 {
446         struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
447         unsigned long total, want, count = 0;
448
449         total = sbi->ll_async_page_count;
450
451         /* There can be a large number of llaps (600k or more in a large
452          * memory machine) so the VM 1/6 shrink ratio is likely too much.
453          * Since we are freeing pages also, we don't necessarily want to
454          * shrink so much.  Limit to 40MB of pages + llaps per call. */
455         if (shrink_fraction == 0)
456                 want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
457         else
458                 want = (total + shrink_fraction - 1) / shrink_fraction;
459
460         if (want > 40 << (20 - CFS_PAGE_SHIFT))
461                 want = 40 << (20 - CFS_PAGE_SHIFT);
462
463         CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
464                want, total, shrink_fraction);
465
466         spin_lock(&sbi->ll_lock);
467         list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
468
469         while (--total >= 0 && count < want) {
470                 struct page *page;
471                 int keep;
472
473                 if (unlikely(need_resched())) {
474                         spin_unlock(&sbi->ll_lock);
475                         cond_resched();
476                         spin_lock(&sbi->ll_lock);
477                 }
478
479                 llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
480                 list_del_init(&dummy_llap.llap_pglist_item);
481                 if (llap == NULL)
482                         break;
483
484                 page = llap->llap_page;
485                 LASSERT(page != NULL);
486
487                 list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
488
489                 /* Page needs/undergoing IO */
490                 if (TryLockPage(page)) {
491                         LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
492                         continue;
493                 }
494
495                keep = (llap->llap_write_queued || PageDirty(page) ||
496                       PageWriteback(page) || (!PageUptodate(page) &&
497                       llap->llap_origin != LLAP_ORIGIN_READAHEAD));
498
499                 LL_CDEBUG_PAGE(D_PAGE, page,"%s LRU page: %s%s%s%s%s origin %s\n",
500                                keep ? "keep" : "drop",
501                                llap->llap_write_queued ? "wq " : "",
502                                PageDirty(page) ? "pd " : "",
503                                PageUptodate(page) ? "" : "!pu ",
504                                PageWriteback(page) ? "wb" : "",
505                                llap->llap_defer_uptodate ? "" : "!du",
506                                llap_origins[llap->llap_origin]);
507
508                 /* If page is dirty or undergoing IO don't discard it */
509                 if (keep) {
510                         unlock_page(page);
511                         continue;
512                 }
513
514                 page_cache_get(page);
515                 spin_unlock(&sbi->ll_lock);
516
517                 if (page->mapping != NULL) {
518                         ll_teardown_mmaps(page->mapping,
519                                          (__u64)page->index << CFS_PAGE_SHIFT,
520                                          ((__u64)page->index << CFS_PAGE_SHIFT)|
521                                           ~CFS_PAGE_MASK);
522                         if (!PageDirty(page) && !page_mapped(page)) {
523                                 ll_ra_accounting(llap, page->mapping);
524                                 ll_truncate_complete_page(page);
525                                 ++count;
526                         } else {
527                                 LL_CDEBUG_PAGE(D_PAGE, page, "Not dropping page"
528                                                              " because it is "
529                                                              "%s\n",
530                                                               PageDirty(page)?
531                                                               "dirty":"mapped");
532                         }
533                 }
534                 unlock_page(page);
535                 page_cache_release(page);
536
537                 spin_lock(&sbi->ll_lock);
538         }
539         list_del(&dummy_llap.llap_pglist_item);
540         spin_unlock(&sbi->ll_lock);
541
542         CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
543                count, want, total);
544
545         return count;
546 }
547
548 static struct ll_async_page *llap_from_page(struct page *page, unsigned origin)
549 {
550         struct ll_async_page *llap;
551         struct obd_export *exp;
552         struct inode *inode = page->mapping->host;
553         struct ll_sb_info *sbi;
554         int rc;
555         ENTRY;
556
557         if (!inode) {
558                 static int triggered;
559
560                 if (!triggered) {
561                         LL_CDEBUG_PAGE(D_ERROR, page, "Bug 10047. Wrong anon "
562                                        "page received\n");
563                         libcfs_debug_dumpstack(NULL);
564                         triggered = 1;
565                 }
566                 RETURN(ERR_PTR(-EINVAL));
567         }
568         sbi = ll_i2sbi(inode);
569         LASSERT(ll_async_page_slab);
570         LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
571
572         llap = llap_cast_private(page);
573         if (llap != NULL) {
574                 /* move to end of LRU list, except when page is just about to
575                  * die */
576                 if (origin != LLAP_ORIGIN_REMOVEPAGE) {
577                         spin_lock(&sbi->ll_lock);
578                         sbi->ll_pglist_gen++;
579                         list_del_init(&llap->llap_pglist_item);
580                         list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
581                         spin_unlock(&sbi->ll_lock);
582                 }
583                 GOTO(out, llap);
584         }
585
586         exp = ll_i2obdexp(page->mapping->host);
587         if (exp == NULL)
588                 RETURN(ERR_PTR(-EINVAL));
589
590         /* limit the number of lustre-cached pages */
591         if (sbi->ll_async_page_count >= sbi->ll_async_page_max)
592                 llap_shrink_cache(sbi, 0);
593
594         OBD_SLAB_ALLOC(llap, ll_async_page_slab, GFP_KERNEL,
595                        ll_async_page_slab_size);
596         if (llap == NULL)
597                 RETURN(ERR_PTR(-ENOMEM));
598         llap->llap_magic = LLAP_MAGIC;
599         llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
600
601         rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
602                                  (obd_off)page->index << CFS_PAGE_SHIFT,
603                                  &ll_async_page_ops, llap, &llap->llap_cookie);
604         if (rc) {
605                 OBD_SLAB_FREE(llap, ll_async_page_slab,
606                               ll_async_page_slab_size);
607                 RETURN(ERR_PTR(rc));
608         }
609
610         CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
611                page, llap->llap_cookie, (obd_off)page->index << CFS_PAGE_SHIFT);
612         /* also zeroing the PRIVBITS low order bitflags */
613         __set_page_ll_data(page, llap);
614         llap->llap_page = page;
615
616         spin_lock(&sbi->ll_lock);
617         sbi->ll_pglist_gen++;
618         sbi->ll_async_page_count++;
619         list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
620         spin_unlock(&sbi->ll_lock);
621
622  out:
623         if (unlikely(sbi->ll_flags & LL_SBI_CHECKSUM)) {
624                 __u32 csum = 0;
625                 csum = crc32_le(csum, kmap(page), CFS_PAGE_SIZE);
626                 kunmap(page);
627                 if (origin == LLAP_ORIGIN_READAHEAD ||
628                     origin == LLAP_ORIGIN_READPAGE) {
629                         llap->llap_checksum = 0;
630                 } else if (origin == LLAP_ORIGIN_COMMIT_WRITE ||
631                            llap->llap_checksum == 0) {
632                         llap->llap_checksum = csum;
633                         CDEBUG(D_PAGE, "page %p cksum %x\n", page, csum);
634                 } else if (llap->llap_checksum == csum) {
635                         /* origin == LLAP_ORIGIN_WRITEPAGE */
636                         CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
637                                page, csum);
638                 } else {
639                         /* origin == LLAP_ORIGIN_WRITEPAGE */
640                         LL_CDEBUG_PAGE(D_ERROR, page, "old cksum %x != new "
641                                        "%x!\n", llap->llap_checksum, csum);
642                 }
643         }
644
645         llap->llap_origin = origin;
646         RETURN(llap);
647 }
648
649 static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
650                                struct ll_async_page *llap,
651                                unsigned to, obd_flag async_flags)
652 {
653         unsigned long size_index = inode->i_size >> CFS_PAGE_SHIFT;
654         struct obd_io_group *oig;
655         struct ll_sb_info *sbi = ll_i2sbi(inode);
656         int rc, noquot = llap->llap_ignore_quota ? OBD_BRW_NOQUOTA : 0;
657         ENTRY;
658
659         /* _make_ready only sees llap once we've unlocked the page */
660         llap->llap_write_queued = 1;
661         rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
662                                 llap->llap_cookie, OBD_BRW_WRITE | noquot,
663                                 0, 0, 0, async_flags);
664         if (rc == 0) {
665                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
666                 llap_write_pending(inode, llap);
667                 GOTO(out, 0);
668         }
669
670         llap->llap_write_queued = 0;
671
672         rc = oig_init(&oig);
673         if (rc)
674                 GOTO(out, rc);
675
676         /* make full-page requests if we are not at EOF (bug 4410) */
677         if (to != CFS_PAGE_SIZE && llap->llap_page->index < size_index) {
678                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
679                                "sync write before EOF: size_index %lu, to %d\n",
680                                size_index, to);
681                 to = CFS_PAGE_SIZE;
682         } else if (to != CFS_PAGE_SIZE && llap->llap_page->index == size_index) {
683                 int size_to = inode->i_size & ~CFS_PAGE_MASK;
684                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
685                                "sync write at EOF: size_index %lu, to %d/%d\n",
686                                size_index, to, size_to);
687                 if (to < size_to)
688                         to = size_to;
689         }
690
691         /* compare the checksum once before the page leaves llite */
692         if (unlikely((sbi->ll_flags & LL_SBI_CHECKSUM) &&
693                      llap->llap_checksum != 0)) {
694                 __u32 csum = 0;
695                 struct page *page = llap->llap_page;
696                 csum = crc32_le(csum, kmap(page), CFS_PAGE_SIZE);
697                 kunmap(page);
698                 if (llap->llap_checksum == csum) {
699                         CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
700                                page, csum);
701                 } else {
702                         CERROR("page %p old cksum %x != new cksum %x!\n",
703                                page, llap->llap_checksum, csum);
704                 }
705         }
706
707         rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
708                                 llap->llap_cookie, OBD_BRW_WRITE | noquot,
709                                 0, to, 0, ASYNC_READY | ASYNC_URGENT |
710                                 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
711         if (rc)
712                 GOTO(free_oig, rc);
713
714         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
715         if (rc)
716                 GOTO(free_oig, rc);
717
718         rc = oig_wait(oig);
719
720         if (!rc && async_flags & ASYNC_READY) {
721                 unlock_page(llap->llap_page);
722                 if (PageWriteback(llap->llap_page)) {
723                         end_page_writeback(llap->llap_page);
724                 }
725         }
726
727         LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
728
729 free_oig:
730         oig_release(oig);
731 out:
732         RETURN(rc);
733 }
734
735 /* update our write count to account for i_size increases that may have
736  * happened since we've queued the page for io. */
737
738 /* be careful not to return success without setting the page Uptodate or
739  * the next pass through prepare_write will read in stale data from disk. */
740 int ll_commit_write(struct file *file, struct page *page, unsigned from,
741                     unsigned to)
742 {
743         struct inode *inode = page->mapping->host;
744         struct ll_inode_info *lli = ll_i2info(inode);
745         struct lov_stripe_md *lsm = lli->lli_smd;
746         struct obd_export *exp;
747         struct ll_async_page *llap;
748         loff_t size;
749         int rc = 0;
750         ENTRY;
751
752         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
753         LASSERT(inode == file->f_dentry->d_inode);
754         LASSERT(PageLocked(page));
755
756         CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
757                inode, page, from, to, page->index);
758
759         llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
760         if (IS_ERR(llap))
761                 RETURN(PTR_ERR(llap));
762
763         exp = ll_i2obdexp(inode);
764         if (exp == NULL)
765                 RETURN(-EINVAL);
766
767         llap->llap_ignore_quota = capable(CAP_SYS_RESOURCE);
768
769         /* queue a write for some time in the future the first time we
770          * dirty the page */
771         if (!PageDirty(page)) {
772                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_MISSES, 1);
773
774                 rc = queue_or_sync_write(exp, inode, llap, to, 0);
775                 if (rc)
776                         GOTO(out, rc);
777         } else {
778                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_HITS, 1);
779         }
780
781         /* put the page in the page cache, from now on ll_removepage is
782          * responsible for cleaning up the llap.
783          * only set page dirty when it's queued to be write out */
784         if (llap->llap_write_queued)
785                 set_page_dirty(page);
786
787 out:
788         size = (((obd_off)page->index) << CFS_PAGE_SHIFT) + to;
789         ll_inode_size_lock(inode, 0);
790         if (rc == 0) {
791                 lov_stripe_lock(lsm);
792                 obd_adjust_kms(exp, lsm, size, 0);
793                 lov_stripe_unlock(lsm);
794                 if (size > inode->i_size)
795                         inode->i_size = size;
796                 SetPageUptodate(page);
797         } else if (size > inode->i_size) {
798                 /* this page beyond the pales of i_size, so it can't be
799                  * truncated in ll_p_r_e during lock revoking. we must
800                  * teardown our book-keeping here. */
801                 ll_removepage(page);
802         }
803         ll_inode_size_unlock(inode, 0);
804         RETURN(rc);
805 }
806
807 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
808 {
809         struct ll_ra_info *ra = &sbi->ll_ra_info;
810         unsigned long ret;
811         ENTRY;
812
813         spin_lock(&sbi->ll_lock);
814         ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
815         ra->ra_cur_pages += ret;
816         spin_unlock(&sbi->ll_lock);
817
818         RETURN(ret);
819 }
820
821 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
822 {
823         struct ll_ra_info *ra = &sbi->ll_ra_info;
824         spin_lock(&sbi->ll_lock);
825         LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
826                  ra->ra_cur_pages, len);
827         ra->ra_cur_pages -= len;
828         spin_unlock(&sbi->ll_lock);
829 }
830
831 /* called for each page in a completed rpc.*/
832 int ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
833 {
834         struct ll_async_page *llap;
835         struct page *page;
836         int ret = 0;
837         ENTRY;
838
839         llap = LLAP_FROM_COOKIE(data);
840         page = llap->llap_page;
841         LASSERT(PageLocked(page));
842         LASSERT(CheckWriteback(page,cmd));
843
844         LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
845
846         if (cmd & OBD_BRW_READ && llap->llap_defer_uptodate)
847                 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
848
849         if (rc == 0)  {
850                 if (cmd & OBD_BRW_READ) {
851                         if (!llap->llap_defer_uptodate)
852                                 SetPageUptodate(page);
853                 } else {
854                         llap->llap_write_queued = 0;
855                 }
856                 ClearPageError(page);
857         } else {
858                 if (cmd & OBD_BRW_READ) {
859                         llap->llap_defer_uptodate = 0;
860                 }
861                 SetPageError(page);
862         }
863
864         unlock_page(page);
865
866         if (cmd & OBD_BRW_WRITE) {
867                 llap_write_complete(page->mapping->host, llap);
868                 ll_try_done_writing(page->mapping->host);
869         }
870
871         if (PageWriteback(page)) {
872                 end_page_writeback(page);
873         }
874         page_cache_release(page);
875
876         RETURN(ret);
877 }
878
879 /* the kernel calls us here when a page is unhashed from the page cache.
880  * the page will be locked and the kernel is holding a spinlock, so
881  * we need to be careful.  we're just tearing down our book-keeping
882  * here. */
883 void ll_removepage(struct page *page)
884 {
885         struct inode *inode = page->mapping->host;
886         struct obd_export *exp;
887         struct ll_async_page *llap;
888         struct ll_sb_info *sbi = ll_i2sbi(inode);
889         int rc;
890         ENTRY;
891
892         LASSERT(!in_interrupt());
893
894         /* sync pages or failed read pages can leave pages in the page
895          * cache that don't have our data associated with them anymore */
896         if (page_private(page) == 0) {
897                 EXIT;
898                 return;
899         }
900
901         LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
902
903         exp = ll_i2obdexp(inode);
904         if (exp == NULL) {
905                 CERROR("page %p ind %lu gave null export\n", page, page->index);
906                 EXIT;
907                 return;
908         }
909
910         llap = llap_from_page(page, LLAP_ORIGIN_REMOVEPAGE);
911         if (IS_ERR(llap)) {
912                 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
913                        page->index, PTR_ERR(llap));
914                 EXIT;
915                 return;
916         }
917
918         //llap_write_complete(inode, llap);
919         rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
920                                      llap->llap_cookie);
921         if (rc != 0)
922                 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
923
924         /* this unconditional free is only safe because the page lock
925          * is providing exclusivity to memory pressure/truncate/writeback..*/
926         __clear_page_ll_data(page);
927
928         spin_lock(&sbi->ll_lock);
929         if (!list_empty(&llap->llap_pglist_item))
930                 list_del_init(&llap->llap_pglist_item);
931         sbi->ll_pglist_gen++;
932         sbi->ll_async_page_count--;
933         spin_unlock(&sbi->ll_lock);
934         OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
935         EXIT;
936 }
937
938 static int ll_page_matches(struct page *page, int fd_flags)
939 {
940         struct lustre_handle match_lockh = {0};
941         struct inode *inode = page->mapping->host;
942         ldlm_policy_data_t page_extent;
943         int flags, matches;
944         ENTRY;
945
946         if (unlikely(fd_flags & LL_FILE_GROUP_LOCKED))
947                 RETURN(1);
948
949         page_extent.l_extent.start = (__u64)page->index << CFS_PAGE_SHIFT;
950         page_extent.l_extent.end =
951                 page_extent.l_extent.start + CFS_PAGE_SIZE - 1;
952         flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
953         if (!(fd_flags & LL_FILE_READAHEAD))
954                 flags |= LDLM_FL_CBPENDING;
955         matches = obd_match(ll_i2sbi(inode)->ll_osc_exp,
956                             ll_i2info(inode)->lli_smd, LDLM_EXTENT,
957                             &page_extent, LCK_PR | LCK_PW, &flags, inode,
958                             &match_lockh);
959         RETURN(matches);
960 }
961
962 static int ll_issue_page_read(struct obd_export *exp,
963                               struct ll_async_page *llap,
964                               struct obd_io_group *oig, int defer)
965 {
966         struct page *page = llap->llap_page;
967         int rc;
968
969         page_cache_get(page);
970         llap->llap_defer_uptodate = defer;
971         llap->llap_ra_used = 0;
972         rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
973                                 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
974                                 CFS_PAGE_SIZE, 0, ASYNC_COUNT_STABLE | ASYNC_READY |
975                                               ASYNC_URGENT);
976         if (rc) {
977                 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
978                 page_cache_release(page);
979         }
980         RETURN(rc);
981 }
982
983 static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
984 {
985         LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
986         ra->ra_stats[which]++;
987 }
988
989 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
990 {
991         struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
992         struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
993
994         spin_lock(&sbi->ll_lock);
995         ll_ra_stats_inc_unlocked(ra, which);
996         spin_unlock(&sbi->ll_lock);
997 }
998
999 void ll_ra_accounting(struct ll_async_page *llap, struct address_space *mapping)
1000 {
1001         if (!llap->llap_defer_uptodate || llap->llap_ra_used)
1002                 return;
1003
1004         ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
1005 }
1006
1007 #define RAS_CDEBUG(ras) \
1008         CDEBUG(D_READA,                                                      \
1009                "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu\n", \
1010                ras->ras_last_readpage, ras->ras_consecutive_requests,        \
1011                ras->ras_consecutive_pages, ras->ras_window_start,            \
1012                ras->ras_window_len, ras->ras_next_readahead,                 \
1013                ras->ras_requests, ras->ras_request_index);
1014
1015 static int index_in_window(unsigned long index, unsigned long point,
1016                            unsigned long before, unsigned long after)
1017 {
1018         unsigned long start = point - before, end = point + after;
1019
1020         if (start > point)
1021                start = 0;
1022         if (end < point)
1023                end = ~0;
1024
1025         return start <= index && index <= end;
1026 }
1027
1028 static struct ll_readahead_state *ll_ras_get(struct file *f)
1029 {
1030         struct ll_file_data       *fd;
1031
1032         fd = LUSTRE_FPRIVATE(f);
1033         return &fd->fd_ras;
1034 }
1035
1036 void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
1037 {
1038         struct ll_readahead_state *ras;
1039
1040         ras = ll_ras_get(f);
1041
1042         spin_lock(&ras->ras_lock);
1043         ras->ras_requests++;
1044         ras->ras_request_index = 0;
1045         ras->ras_consecutive_requests++;
1046         rar->lrr_reader = current;
1047
1048         list_add(&rar->lrr_linkage, &ras->ras_read_beads);
1049         spin_unlock(&ras->ras_lock);
1050 }
1051
1052 void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
1053 {
1054         struct ll_readahead_state *ras;
1055
1056         ras = ll_ras_get(f);
1057
1058         spin_lock(&ras->ras_lock);
1059         list_del_init(&rar->lrr_linkage);
1060         spin_unlock(&ras->ras_lock);
1061 }
1062
1063 static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
1064 {
1065         struct ll_ra_read *scan;
1066
1067         list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
1068                 if (scan->lrr_reader == current)
1069                         return scan;
1070         }
1071         return NULL;
1072 }
1073
1074 struct ll_ra_read *ll_ra_read_get(struct file *f)
1075 {
1076         struct ll_readahead_state *ras;
1077         struct ll_ra_read         *bead;
1078
1079         ras = ll_ras_get(f);
1080
1081         spin_lock(&ras->ras_lock);
1082         bead = ll_ra_read_get_locked(ras);
1083         spin_unlock(&ras->ras_lock);
1084         return bead;
1085 }
1086
1087 static int ll_readahead(struct ll_readahead_state *ras,
1088                          struct obd_export *exp, struct address_space *mapping,
1089                          struct obd_io_group *oig, int flags)
1090 {
1091         unsigned long i, start = 0, end = 0, reserved;
1092         struct ll_async_page *llap;
1093         struct page *page;
1094         int rc, ret = 0, match_failed = 0;
1095         __u64 kms;
1096         unsigned int gfp_mask;
1097         struct inode *inode;
1098         struct lov_stripe_md *lsm;
1099         struct ll_ra_read *bead;
1100         struct ost_lvb lvb;
1101         ENTRY;
1102
1103         inode = mapping->host;
1104         lsm = ll_i2info(inode)->lli_smd;
1105
1106         lov_stripe_lock(lsm);
1107         inode_init_lvb(inode, &lvb);
1108         obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
1109         kms = lvb.lvb_size;
1110         lov_stripe_unlock(lsm);
1111         if (kms == 0) {
1112                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
1113                 RETURN(0);
1114         }
1115
1116         spin_lock(&ras->ras_lock);
1117         bead = ll_ra_read_get_locked(ras);
1118         /* Enlarge the RA window to encompass the full read */
1119         if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
1120             bead->lrr_start + bead->lrr_count) {
1121                 ras->ras_window_len = bead->lrr_start + bead->lrr_count -
1122                                       ras->ras_window_start;
1123         }
1124         /* Reserve a part of the read-ahead window that we'll be issuing */
1125         if (ras->ras_window_len) {
1126                 start = ras->ras_next_readahead;
1127                 end = ras->ras_window_start + ras->ras_window_len - 1;
1128         }
1129         if (end != 0) {
1130                 /* Truncate RA window to end of file */
1131                 end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
1132                 ras->ras_next_readahead = max(end, end + 1);
1133                 RAS_CDEBUG(ras);
1134         }
1135         spin_unlock(&ras->ras_lock);
1136
1137         if (end == 0) {
1138                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
1139                 RETURN(0);
1140         }
1141
1142         reserved = ll_ra_count_get(ll_i2sbi(inode), end - start + 1);
1143         if (reserved < end - start + 1)
1144                 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
1145
1146         gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
1147 #ifdef __GFP_NOWARN
1148         gfp_mask |= __GFP_NOWARN;
1149 #endif
1150
1151         for (i = start; reserved > 0 && !match_failed && i <= end; i++) {
1152                 /* skip locked pages from previous readpage calls */
1153                 page = grab_cache_page_nowait_gfp(mapping, i, gfp_mask);
1154                 if (page == NULL) {
1155                         ll_ra_stats_inc(mapping, RA_STAT_FAILED_GRAB_PAGE);
1156                         CDEBUG(D_READA, "g_c_p_n failed\n");
1157                         continue;
1158                 }
1159
1160                 /* Check if page was truncated or reclaimed */
1161                 if (page->mapping != mapping) {
1162                         ll_ra_stats_inc(mapping, RA_STAT_WRONG_GRAB_PAGE);
1163                         CDEBUG(D_READA, "g_c_p_n returned invalid page\n");
1164                         goto next_page;
1165                 }
1166
1167                 /* we do this first so that we can see the page in the /proc
1168                  * accounting */
1169                 llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
1170                 if (IS_ERR(llap) || llap->llap_defer_uptodate)
1171                         goto next_page;
1172
1173                 /* skip completed pages */
1174                 if (Page_Uptodate(page))
1175                         goto next_page;
1176
1177                 /* bail when we hit the end of the lock. */
1178                 if ((rc = ll_page_matches(page, flags|LL_FILE_READAHEAD)) <= 0){
1179                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
1180                                        "lock match failed: rc %d\n", rc);
1181                         ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
1182                         match_failed = 1;
1183                         goto next_page;
1184                 }
1185
1186                 rc = ll_issue_page_read(exp, llap, oig, 1);
1187                 if (rc == 0) {
1188                         reserved--;
1189                         ret++;
1190                         LL_CDEBUG_PAGE(D_READA| D_PAGE, page,
1191                                        "started read-ahead\n");
1192                 } else {
1193         next_page:
1194                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
1195                                        "skipping read-ahead\n");
1196
1197                         unlock_page(page);
1198                 }
1199                 page_cache_release(page);
1200         }
1201
1202         LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
1203         if (reserved != 0)
1204                 ll_ra_count_put(ll_i2sbi(inode), reserved);
1205         if (i == end + 1 && end == (kms >> CFS_PAGE_SHIFT))
1206                 ll_ra_stats_inc(mapping, RA_STAT_EOF);
1207
1208         /* if we didn't get to the end of the region we reserved from
1209          * the ras we need to go back and update the ras so that the
1210          * next read-ahead tries from where we left off.  we only do so
1211          * if the region we failed to issue read-ahead on is still ahead
1212          * of the app and behind the next index to start read-ahead from */
1213         if (i != end + 1) {
1214                 spin_lock(&ras->ras_lock);
1215                 if (i < ras->ras_next_readahead &&
1216                     index_in_window(i, ras->ras_window_start, 0,
1217                                     ras->ras_window_len)) {
1218                         ras->ras_next_readahead = i;
1219                         RAS_CDEBUG(ras);
1220                 }
1221                 spin_unlock(&ras->ras_lock);
1222         }
1223
1224         RETURN(ret);
1225 }
1226
1227 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
1228 {
1229         ras->ras_window_start = index & (~((1024 * 1024 >> CFS_PAGE_SHIFT) - 1));
1230 }
1231
1232 /* called with the ras_lock held or from places where it doesn't matter */
1233 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
1234 {
1235         ras->ras_last_readpage = index;
1236         ras->ras_consecutive_requests = 0;
1237         ras->ras_consecutive_pages = 0;
1238         ras->ras_window_len = 0;
1239         ras_set_start(ras, index);
1240         ras->ras_next_readahead = max(ras->ras_window_start, index);
1241
1242         RAS_CDEBUG(ras);
1243 }
1244
1245 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
1246 {
1247         spin_lock_init(&ras->ras_lock);
1248         ras_reset(ras, 0);
1249         ras->ras_requests = 0;
1250         INIT_LIST_HEAD(&ras->ras_read_beads);
1251 }
1252
1253 static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
1254                        struct ll_readahead_state *ras, unsigned long index,
1255                        unsigned hit)
1256 {
1257         struct ll_ra_info *ra = &sbi->ll_ra_info;
1258         int zero = 0;
1259         ENTRY;
1260
1261         spin_lock(&sbi->ll_lock);
1262         spin_lock(&ras->ras_lock);
1263
1264         ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
1265
1266         /* reset the read-ahead window in two cases.  First when the app seeks
1267          * or reads to some other part of the file.  Secondly if we get a
1268          * read-ahead miss that we think we've previously issued.  This can
1269          * be a symptom of there being so many read-ahead pages that the VM is
1270          * reclaiming it before we get to it. */
1271         if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
1272                 zero = 1;
1273                 ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
1274         } else if (!hit && ras->ras_window_len &&
1275                    index < ras->ras_next_readahead &&
1276                    index_in_window(index, ras->ras_window_start, 0,
1277                                    ras->ras_window_len)) {
1278                 zero = 1;
1279                 ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
1280         }
1281
1282         /* On the second access to a file smaller than the tunable
1283          * ra_max_read_ahead_whole_pages trigger RA on all pages in the
1284          * file up to ra_max_pages.  This is simply a best effort and
1285          * only occurs once per open file.  Normal RA behavior is reverted
1286          * to for subsequent IO.  The mmap case does not increment
1287          * ras_requests and thus can never trigger this behavior. */
1288         if (ras->ras_requests == 2 && !ras->ras_request_index) {
1289                 __u64 kms_pages;
1290
1291                 kms_pages = (inode->i_size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
1292
1293                 CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages,
1294                        ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages);
1295
1296                 if (kms_pages &&
1297                     kms_pages <= ra->ra_max_read_ahead_whole_pages) {
1298                         ras->ras_window_start = 0;
1299                         ras->ras_last_readpage = 0;
1300                         ras->ras_next_readahead = 0;
1301                         ras->ras_window_len = min(ra->ra_max_pages,
1302                                 ra->ra_max_read_ahead_whole_pages);
1303                         GOTO(out_unlock, 0);
1304                 }
1305         }
1306
1307         if (zero) {
1308                 ras_reset(ras, index);
1309                 GOTO(out_unlock, 0);
1310         }
1311
1312         ras->ras_last_readpage = index;
1313         ras->ras_consecutive_pages++;
1314         ras_set_start(ras, index);
1315         ras->ras_next_readahead = max(ras->ras_window_start,
1316                                       ras->ras_next_readahead);
1317
1318         /* Trigger RA in the mmap case where ras_consecutive_requests
1319          * is not incremented and thus can't be used to trigger RA */
1320         if (!ras->ras_window_len && ras->ras_consecutive_pages == 3) {
1321                 ras->ras_window_len = 1024 * 1024 >> CFS_PAGE_SHIFT;
1322                 GOTO(out_unlock, 0);
1323         }
1324
1325         /* The initial ras_window_len is set to the request size.  To avoid
1326          * uselessly reading and discarding pages for random IO the window is
1327          * only increased once per consecutive request received. */
1328         if (ras->ras_consecutive_requests > 1 && !ras->ras_request_index) {
1329                 ras->ras_window_len = min(ras->ras_window_len +
1330                                           (1024 * 1024 >> CFS_PAGE_SHIFT),
1331                                           ra->ra_max_pages);
1332         }
1333
1334         EXIT;
1335 out_unlock:
1336         RAS_CDEBUG(ras);
1337         ras->ras_request_index++;
1338         spin_unlock(&ras->ras_lock);
1339         spin_unlock(&sbi->ll_lock);
1340         return;
1341 }
1342
1343 int ll_writepage(struct page *page)
1344 {
1345         struct inode *inode = page->mapping->host;
1346         struct ll_inode_info *lli = ll_i2info(inode);
1347         struct obd_export *exp;
1348         struct ll_async_page *llap;
1349         int rc = 0;
1350         ENTRY;
1351
1352         LASSERT(!PageDirty(page));
1353         LASSERT(PageLocked(page));
1354
1355         exp = ll_i2obdexp(inode);
1356         if (exp == NULL)
1357                 GOTO(out, rc = -EINVAL);
1358
1359         llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
1360         if (IS_ERR(llap))
1361                 GOTO(out, rc = PTR_ERR(llap));
1362
1363         LASSERT(!PageWriteback(page));
1364         set_page_writeback(page);
1365
1366         page_cache_get(page);
1367         if (llap->llap_write_queued) {
1368                 LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
1369                 rc = obd_set_async_flags(exp, lli->lli_smd, NULL,
1370                                          llap->llap_cookie,
1371                                          ASYNC_READY | ASYNC_URGENT);
1372         } else {
1373                 rc = queue_or_sync_write(exp, inode, llap, CFS_PAGE_SIZE,
1374                                          ASYNC_READY | ASYNC_URGENT);
1375         }
1376         if (rc)
1377                 page_cache_release(page);
1378 out:
1379         if (rc) {
1380                 if (!lli->lli_async_rc)
1381                         lli->lli_async_rc = rc;
1382                 /* re-dirty page on error so it retries write */
1383                 if (PageWriteback(page)) {
1384                         end_page_writeback(page);
1385                 }
1386                 /* resend page only for not started IO*/
1387                 if (!PageError(page))
1388                         ll_redirty_page(page);
1389                 unlock_page(page);
1390         }
1391         RETURN(rc);
1392 }
1393
1394 /*
1395  * for now we do our readpage the same on both 2.4 and 2.5.  The kernel's
1396  * read-ahead assumes it is valid to issue readpage all the way up to
1397  * i_size, but our dlm locks make that not the case.  We disable the
1398  * kernel's read-ahead and do our own by walking ahead in the page cache
1399  * checking for dlm lock coverage.  the main difference between 2.4 and
1400  * 2.6 is how read-ahead gets batched and issued, but we're using our own,
1401  * so they look the same.
1402  */
1403 int ll_readpage(struct file *filp, struct page *page)
1404 {
1405         struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
1406         struct inode *inode = page->mapping->host;
1407         struct obd_export *exp;
1408         struct ll_async_page *llap;
1409         struct obd_io_group *oig = NULL;
1410         int rc;
1411         ENTRY;
1412
1413         LASSERT(PageLocked(page));
1414         LASSERT(!PageUptodate(page));
1415         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset=%Lu=%#Lx\n",
1416                inode->i_ino, inode->i_generation, inode,
1417                (((loff_t)page->index) << CFS_PAGE_SHIFT),
1418                (((loff_t)page->index) << CFS_PAGE_SHIFT));
1419         LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
1420
1421         if (!ll_i2info(inode)->lli_smd) {
1422                 /* File with no objects - one big hole */
1423                 /* We use this just for remove_from_page_cache that is not
1424                  * exported, we'd make page back up to date. */
1425                 ll_truncate_complete_page(page);
1426                 clear_page(kmap(page));
1427                 kunmap(page);
1428                 SetPageUptodate(page);
1429                 unlock_page(page);
1430                 RETURN(0);
1431         }
1432
1433         rc = oig_init(&oig);
1434         if (rc < 0)
1435                 GOTO(out, rc);
1436
1437         exp = ll_i2obdexp(inode);
1438         if (exp == NULL)
1439                 GOTO(out, rc = -EINVAL);
1440
1441         llap = llap_from_page(page, LLAP_ORIGIN_READPAGE);
1442         if (IS_ERR(llap))
1443                 GOTO(out, rc = PTR_ERR(llap));
1444
1445         if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1446                 ras_update(ll_i2sbi(inode), inode, &fd->fd_ras, page->index,
1447                            llap->llap_defer_uptodate);
1448
1449
1450         if (llap->llap_defer_uptodate) {
1451                 /* This is the callpath if we got the page from a readahead */
1452                 llap->llap_ra_used = 1;
1453                 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1454                                   fd->fd_flags);
1455                 if (rc > 0)
1456                         obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
1457                                              NULL, oig);
1458                 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
1459                 SetPageUptodate(page);
1460                 unlock_page(page);
1461                 GOTO(out_oig, rc = 0);
1462         }
1463
1464         if (likely((fd->fd_flags & LL_FILE_IGNORE_LOCK) == 0)) {
1465                 rc = ll_page_matches(page, fd->fd_flags);
1466                 if (rc < 0) {
1467                         LL_CDEBUG_PAGE(D_ERROR, page,
1468                                        "lock match failed: rc %d\n", rc);
1469                         GOTO(out, rc);
1470                 }
1471
1472                 if (rc == 0) {
1473                         CWARN("ino %lu page %lu (%llu) not covered by "
1474                               "a lock (mmap?).  check debug logs.\n",
1475                               inode->i_ino, page->index,
1476                               (long long)page->index << CFS_PAGE_SHIFT);
1477                 }
1478         }
1479
1480         rc = ll_issue_page_read(exp, llap, oig, 0);
1481         if (rc)
1482                 GOTO(out, rc);
1483
1484         LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
1485         /* We have just requested the actual page we want, see if we can tack
1486          * on some readahead to that page's RPC before it is sent. */
1487         if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1488                 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1489                              fd->fd_flags);
1490
1491         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
1492
1493 out:
1494         if (rc)
1495                 unlock_page(page);
1496 out_oig:
1497         if (oig != NULL)
1498                 oig_release(oig);
1499         RETURN(rc);
1500 }