Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / llite / rw.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Lite I/O page cache routines shared by different kernel revs
5  *
6  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #ifndef AUTOCONF_INCLUDED
25 #include <linux/config.h>
26 #endif
27 #include <linux/kernel.h>
28 #include <linux/mm.h>
29 #include <linux/string.h>
30 #include <linux/stat.h>
31 #include <linux/errno.h>
32 #include <linux/smp_lock.h>
33 #include <linux/unistd.h>
34 #include <linux/version.h>
35 #include <asm/system.h>
36 #include <asm/uaccess.h>
37
38 #include <linux/fs.h>
39 #include <linux/stat.h>
40 #include <asm/uaccess.h>
41 #include <asm/segment.h>
42 #include <linux/mm.h>
43 #include <linux/pagemap.h>
44 #include <linux/smp_lock.h>
45
46 #define DEBUG_SUBSYSTEM S_LLITE
47
48 //#include <lustre_mdc.h>
49 #include <lustre_lite.h>
50 #include "llite_internal.h"
51 #include <linux/lustre_compat25.h>
52
53 #ifndef list_for_each_prev_safe
54 #define list_for_each_prev_safe(pos, n, head) \
55         for (pos = (head)->prev, n = pos->prev; pos != (head); \
56                 pos = n, n = pos->prev )
57 #endif
58
59 cfs_mem_cache_t *ll_async_page_slab = NULL;
60 size_t ll_async_page_slab_size = 0;
61
62 /* SYNCHRONOUS I/O to object storage for an inode */
63 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
64                   struct page *page, int flags)
65 {
66         struct ll_inode_info *lli = ll_i2info(inode);
67         struct lov_stripe_md *lsm = lli->lli_smd;
68         struct obd_info oinfo = { { { 0 } } };
69         struct brw_page pg;
70         int opc, rc;
71         ENTRY;
72
73         pg.pg = page;
74         pg.off = ((obd_off)page->index) << CFS_PAGE_SHIFT;
75
76         if ((cmd & OBD_BRW_WRITE) && (pg.off+CFS_PAGE_SIZE>i_size_read(inode)))
77                 pg.count = i_size_read(inode) % CFS_PAGE_SIZE;
78         else
79                 pg.count = CFS_PAGE_SIZE;
80
81         LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
82                        cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
83                        inode->i_ino, pg.off, pg.off);
84         if (pg.count == 0) {
85                 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
86                        LPU64"\n", inode->i_ino, inode, i_size_read(inode),
87                        page->mapping->host, i_size_read(page->mapping->host),
88                        page->index, pg.off);
89         }
90
91         pg.flag = flags;
92
93         if (cmd & OBD_BRW_WRITE)
94                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_WRITE,
95                                    pg.count);
96         else
97                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_READ,
98                                    pg.count);
99         oinfo.oi_oa = oa;
100         oinfo.oi_md = lsm;
101         /* NB partial write, so we might not have CAPA_OPC_OSS_READ capa */
102         opc = cmd & OBD_BRW_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
103         oinfo.oi_capa = ll_osscapa_get(inode, current->fsuid, opc);
104         rc = obd_brw(cmd, ll_i2dtexp(inode), &oinfo, 1, &pg, NULL);
105         capa_put(oinfo.oi_capa);
106         if (rc == 0)
107                 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
108         else if (rc != -EIO)
109                 CERROR("error from obd_brw: rc = %d\n", rc);
110         RETURN(rc);
111 }
112
113 /* this isn't where truncate starts.   roughly:
114  * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate. setattr_raw grabs
115  * DLM lock on [size, EOF], i_mutex, ->lli_size_sem, and WRITE_I_ALLOC_SEM to
116  * avoid races.
117  *
118  * must be called under ->lli_size_sem */
119 void ll_truncate(struct inode *inode)
120 {
121         struct ll_inode_info *lli = ll_i2info(inode);
122         struct obd_info oinfo = { { { 0 } } };
123         struct ost_lvb lvb;
124         struct obdo oa;
125         int rc;
126         ENTRY;
127         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu=%#Lx\n",inode->i_ino,
128                inode->i_generation, inode, i_size_read(inode),
129                i_size_read(inode));
130
131         ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
132         if (lli->lli_size_sem_owner != current) {
133                 EXIT;
134                 return;
135         }
136
137         if (!lli->lli_smd) {
138                 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
139                        inode->i_ino);
140                 GOTO(out_unlock, 0);
141         }
142
143         LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
144
145         /* XXX I'm pretty sure this is a hack to paper over a more fundamental
146          * race condition. */
147         lov_stripe_lock(lli->lli_smd);
148         inode_init_lvb(inode, &lvb);
149         obd_merge_lvb(ll_i2dtexp(inode), lli->lli_smd, &lvb, 0);
150         if (lvb.lvb_size == i_size_read(inode)) {
151                 CDEBUG(D_VFSTRACE, "skipping punch for obj "LPX64", %Lu=%#Lx\n",
152                        lli->lli_smd->lsm_object_id, i_size_read(inode),
153                        i_size_read(inode));
154                 lov_stripe_unlock(lli->lli_smd);
155                 GOTO(out_unlock, 0);
156         }
157
158         obd_adjust_kms(ll_i2dtexp(inode), lli->lli_smd, i_size_read(inode), 1);
159         lov_stripe_unlock(lli->lli_smd);
160
161         if (unlikely((ll_i2sbi(inode)->ll_flags & LL_SBI_CHECKSUM) &&
162                      (i_size_read(inode) & ~CFS_PAGE_MASK))) {
163                 /* If the truncate leaves behind a partial page, update its
164                  * checksum. */
165                 struct page *page = find_get_page(inode->i_mapping,
166                                                   i_size_read(inode) >>
167                                                   CFS_PAGE_SHIFT);
168                 if (page != NULL) {
169                         struct ll_async_page *llap = llap_cast_private(page);
170                         if (llap != NULL) {
171                                 llap->llap_checksum =
172                                         crc32_le(0, kmap(page), CFS_PAGE_SIZE);
173                                 kunmap(page);
174                         }
175                         page_cache_release(page);
176                 }
177         }
178
179         CDEBUG(D_INFO, "calling punch for "LPX64" (new size %Lu=%#Lx)\n",
180                lli->lli_smd->lsm_object_id, i_size_read(inode), i_size_read(inode));
181
182         oinfo.oi_md = lli->lli_smd;
183         oinfo.oi_policy.l_extent.start = i_size_read(inode);
184         oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
185         oinfo.oi_oa = &oa;
186         oa.o_id = lli->lli_smd->lsm_object_id;
187         oa.o_gr = lli->lli_smd->lsm_object_gr;
188         oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
189
190         obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |
191                         OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME |
192                         OBD_MD_FLFID | OBD_MD_FLGENER);
193
194         ll_inode_size_unlock(inode, 0);
195
196         oinfo.oi_capa = ll_osscapa_get(inode, 0, CAPA_OPC_OSS_TRUNC);
197         rc = obd_punch_rqset(ll_i2dtexp(inode), &oinfo, NULL);
198         ll_truncate_free_capa(oinfo.oi_capa);
199         if (rc)
200                 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
201         else
202                 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
203                               OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
204         EXIT;
205         return;
206
207  out_unlock:
208         ll_inode_size_unlock(inode, 0);
209 } /* ll_truncate */
210
211 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
212                      unsigned to)
213 {
214         struct inode *inode = page->mapping->host;
215         struct ll_inode_info *lli = ll_i2info(inode);
216         struct lov_stripe_md *lsm = lli->lli_smd;
217         obd_off offset = ((obd_off)page->index) << CFS_PAGE_SHIFT;
218         struct obd_info oinfo = { { { 0 } } };
219         struct brw_page pga;
220         struct obdo oa;
221         struct ost_lvb lvb;
222         int rc = 0;
223         ENTRY;
224
225         LASSERT(PageLocked(page));
226         (void)llap_cast_private(page); /* assertion */
227
228         /* Check to see if we should return -EIO right away */
229         pga.pg = page;
230         pga.off = offset;
231         pga.count = CFS_PAGE_SIZE;
232         pga.flag = 0;
233
234         oa.o_mode = inode->i_mode;
235         oa.o_id = lsm->lsm_object_id;
236         oa.o_gr = lsm->lsm_object_gr;
237         oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | 
238                      OBD_MD_FLTYPE | OBD_MD_FLGROUP;
239         obdo_from_inode(&oa, inode, OBD_MD_FLFID | OBD_MD_FLGENER);
240
241         oinfo.oi_oa = &oa;
242         oinfo.oi_md = lsm;
243         rc = obd_brw(OBD_BRW_CHECK, ll_i2dtexp(inode), &oinfo, 1, &pga, NULL);
244         if (rc)
245                 RETURN(rc);
246
247         if (PageUptodate(page)) {
248                 LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
249                 RETURN(0);
250         }
251
252         /* We're completely overwriting an existing page, so _don't_ set it up
253          * to date until commit_write */
254         if (from == 0 && to == CFS_PAGE_SIZE) {
255                 LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
256                 POISON_PAGE(page, 0x11);
257                 RETURN(0);
258         }
259
260         /* If are writing to a new page, no need to read old data.  The extent
261          * locking will have updated the KMS, and for our purposes here we can
262          * treat it like i_size. */
263         lov_stripe_lock(lsm);
264         inode_init_lvb(inode, &lvb);
265         obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
266         lov_stripe_unlock(lsm);
267         if (lvb.lvb_size <= offset) {
268                 LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
269                                lvb.lvb_size, offset);
270                 memset(kmap(page), 0, CFS_PAGE_SIZE);
271                 kunmap(page);
272                 GOTO(prepare_done, rc = 0);
273         }
274
275         /* XXX could be an async ocp read.. read-ahead? */
276         rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
277         if (rc == 0) {
278                 /* bug 1598: don't clobber blksize */
279                 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
280                 obdo_refresh_inode(inode, &oa, oa.o_valid);
281         }
282
283         EXIT;
284  prepare_done:
285         if (rc == 0)
286                 SetPageUptodate(page);
287
288         return rc;
289 }
290
291 static int ll_ap_make_ready(void *data, int cmd)
292 {
293         struct ll_async_page *llap;
294         struct page *page;
295         ENTRY;
296
297         llap = LLAP_FROM_COOKIE(data);
298         page = llap->llap_page;
299
300         LASSERTF(!(cmd & OBD_BRW_READ), "cmd %x page %p ino %lu index %lu\n", cmd, page,
301                  page->mapping->host->i_ino, page->index);
302
303         /* we're trying to write, but the page is locked.. come back later */
304         if (TryLockPage(page))
305                 RETURN(-EAGAIN);
306
307         LASSERT(!PageWriteback(page));
308
309         /* if we left PageDirty we might get another writepage call
310          * in the future.  list walkers are bright enough
311          * to check page dirty so we can leave it on whatever list
312          * its on.  XXX also, we're called with the cli list so if
313          * we got the page cache list we'd create a lock inversion
314          * with the removepage path which gets the page lock then the
315          * cli lock */
316 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
317         clear_page_dirty(page);
318 #else
319         LASSERTF(!PageWriteback(page),"cmd %x page %p ino %lu index %lu\n", cmd, page,
320                  page->mapping->host->i_ino, page->index);
321         clear_page_dirty_for_io(page);
322
323         /* This actually clears the dirty bit in the radix tree.*/
324         set_page_writeback(page);
325 #endif
326
327         LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
328         page_cache_get(page);
329
330         RETURN(0);
331 }
332
333 /* We have two reasons for giving llite the opportunity to change the
334  * write length of a given queued page as it builds the RPC containing
335  * the page:
336  *
337  * 1) Further extending writes may have landed in the page cache
338  *    since a partial write first queued this page requiring us
339  *    to write more from the page cache.  (No further races are possible, since
340  *    by the time this is called, the page is locked.)
341  * 2) We might have raced with truncate and want to avoid performing
342  *    write RPCs that are just going to be thrown away by the
343  *    truncate's punch on the storage targets.
344  *
345  * The kms serves these purposes as it is set at both truncate and extending
346  * writes.
347  */
348 static int ll_ap_refresh_count(void *data, int cmd)
349 {
350         struct ll_inode_info *lli;
351         struct ll_async_page *llap;
352         struct lov_stripe_md *lsm;
353         struct page *page;
354         struct inode *inode;
355         struct ost_lvb lvb;
356         __u64 kms;
357         ENTRY;
358
359         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
360         LASSERT(cmd != OBD_BRW_READ);
361
362         llap = LLAP_FROM_COOKIE(data);
363         page = llap->llap_page;
364         inode = page->mapping->host;
365         lli = ll_i2info(inode);
366         lsm = lli->lli_smd;
367
368         lov_stripe_lock(lsm);
369         inode_init_lvb(inode, &lvb);
370         obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
371         kms = lvb.lvb_size;
372         lov_stripe_unlock(lsm);
373
374         /* catch race with truncate */
375         if (((__u64)page->index << CFS_PAGE_SHIFT) >= kms)
376                 return 0;
377
378         /* catch sub-page write at end of file */
379         if (((__u64)page->index << CFS_PAGE_SHIFT) + CFS_PAGE_SIZE > kms)
380                 return kms % CFS_PAGE_SIZE;
381
382         return CFS_PAGE_SIZE;
383 }
384
385 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
386 {
387         struct lov_stripe_md *lsm;
388         obd_flag valid_flags;
389
390         lsm = ll_i2info(inode)->lli_smd;
391
392         oa->o_id = lsm->lsm_object_id;
393         oa->o_gr = lsm->lsm_object_gr;
394         oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
395         valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
396         if (cmd & OBD_BRW_WRITE) {
397                 oa->o_valid |= OBD_MD_FLEPOCH;
398                 oa->o_easize = ll_i2info(inode)->lli_ioepoch;
399
400                 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
401                         OBD_MD_FLUID | OBD_MD_FLGID |
402                         OBD_MD_FLFID | OBD_MD_FLGENER;
403         }
404
405         obdo_from_inode(oa, inode, valid_flags);
406 }
407
408 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
409 {
410         struct ll_async_page *llap;
411         ENTRY;
412
413         llap = LLAP_FROM_COOKIE(data);
414         ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
415
416         EXIT;
417 }
418
419 static void ll_ap_update_obdo(void *data, int cmd, struct obdo *oa,
420                               obd_valid valid)
421 {
422         struct ll_async_page *llap;
423         ENTRY;
424
425         llap = LLAP_FROM_COOKIE(data);
426         obdo_from_inode(oa, llap->llap_page->mapping->host, valid);
427
428         EXIT;
429 }
430
431 static struct obd_capa *ll_ap_lookup_capa(void *data, int cmd)
432 {
433         struct ll_async_page *llap = LLAP_FROM_COOKIE(data);
434         int opc = cmd & OBD_BRW_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
435
436         return ll_osscapa_get(llap->llap_page->mapping->host, llap->llap_fsuid,
437                               opc);
438 }
439
440 static struct obd_async_page_ops ll_async_page_ops = {
441         .ap_make_ready =        ll_ap_make_ready,
442         .ap_refresh_count =     ll_ap_refresh_count,
443         .ap_fill_obdo =         ll_ap_fill_obdo,
444         .ap_update_obdo =       ll_ap_update_obdo,
445         .ap_completion =        ll_ap_completion,
446         .ap_lookup_capa =       ll_ap_lookup_capa,
447 };
448
449 struct ll_async_page *llap_cast_private(struct page *page)
450 {
451         struct ll_async_page *llap = (struct ll_async_page *)page_private(page);
452
453         LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
454                  "page %p private %lu gave magic %d which != %d\n",
455                  page, page_private(page), llap->llap_magic, LLAP_MAGIC);
456
457         return llap;
458 }
459
460 /* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
461  *
462  * There is an llap attached onto every page in lustre, linked off @sbi.
463  * We add an llap to the list so we don't lose our place during list walking.
464  * If llaps in the list are being moved they will only move to the end
465  * of the LRU, and we aren't terribly interested in those pages here (we
466  * start at the beginning of the list where the least-used llaps are.
467  */
468 int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
469 {
470         struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
471         unsigned long total, want, count = 0;
472
473         total = sbi->ll_async_page_count;
474
475         /* There can be a large number of llaps (600k or more in a large
476          * memory machine) so the VM 1/6 shrink ratio is likely too much.
477          * Since we are freeing pages also, we don't necessarily want to
478          * shrink so much.  Limit to 40MB of pages + llaps per call. */
479         if (shrink_fraction == 0)
480                 want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
481         else
482                 want = (total + shrink_fraction - 1) / shrink_fraction;
483
484         if (want > 40 << (20 - CFS_PAGE_SHIFT))
485                 want = 40 << (20 - CFS_PAGE_SHIFT);
486
487         CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
488                want, total, shrink_fraction);
489
490         spin_lock(&sbi->ll_lock);
491         list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
492
493         while (--total >= 0 && count < want) {
494                 struct page *page;
495                 int keep;
496
497                 if (unlikely(need_resched())) {
498                         spin_unlock(&sbi->ll_lock);
499                         cond_resched();
500                         spin_lock(&sbi->ll_lock);
501                 }
502
503                 llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
504                 list_del_init(&dummy_llap.llap_pglist_item);
505                 if (llap == NULL)
506                         break;
507
508                 page = llap->llap_page;
509                 LASSERT(page != NULL);
510
511                 list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
512
513                 /* Page needs/undergoing IO */
514                 if (TryLockPage(page)) {
515                         LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
516                         continue;
517                 }
518
519                 if (llap->llap_write_queued || PageDirty(page) ||
520                     (!PageUptodate(page) &&
521                      llap->llap_origin != LLAP_ORIGIN_READAHEAD))
522                         keep = 1;
523                 else
524                         keep = 0;
525
526                 LL_CDEBUG_PAGE(D_PAGE, page,"%s LRU page: %s%s%s%s origin %s\n",
527                                keep ? "keep" : "drop",
528                                llap->llap_write_queued ? "wq " : "",
529                                PageDirty(page) ? "pd " : "",
530                                PageUptodate(page) ? "" : "!pu ",
531                                llap->llap_defer_uptodate ? "" : "!du",
532                                llap_origins[llap->llap_origin]);
533
534                 /* If page is dirty or undergoing IO don't discard it */
535                 if (keep) {
536                         unlock_page(page);
537                         continue;
538                 }
539
540                 page_cache_get(page);
541                 spin_unlock(&sbi->ll_lock);
542
543                 if (page->mapping != NULL) {
544                         ll_teardown_mmaps(page->mapping,
545                                          (__u64)page->index << CFS_PAGE_SHIFT,
546                                          ((__u64)page->index << CFS_PAGE_SHIFT)|
547                                           ~CFS_PAGE_MASK);
548                         if (!PageDirty(page) && !page_mapped(page)) {
549                                 ll_ra_accounting(llap, page->mapping);
550                                 ll_truncate_complete_page(page);
551                                 ++count;
552                         } else {
553                                 LL_CDEBUG_PAGE(D_PAGE, page, "Not dropping page"
554                                                              " because it is "
555                                                              "%s\n",
556                                                               PageDirty(page)?
557                                                               "dirty":"mapped");
558                         }
559                 }
560                 unlock_page(page);
561                 page_cache_release(page);
562
563                 spin_lock(&sbi->ll_lock);
564         }
565         list_del(&dummy_llap.llap_pglist_item);
566         spin_unlock(&sbi->ll_lock);
567
568         CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
569                count, want, total);
570
571         return count;
572 }
573
574 struct ll_async_page *llap_from_page(struct page *page, unsigned origin)
575 {
576         struct ll_async_page *llap;
577         struct obd_export *exp;
578         struct inode *inode = page->mapping->host;
579         struct ll_sb_info *sbi;
580         int rc;
581         ENTRY;
582
583         if (!inode) {
584                 static int triggered;
585
586                 if (!triggered) {
587                         LL_CDEBUG_PAGE(D_ERROR, page, "Bug 10047. Wrong anon "
588                                        "page received\n");
589                         libcfs_debug_dumpstack(NULL);
590                         triggered = 1;
591                 }
592                 RETURN(ERR_PTR(-EINVAL));
593         }
594         sbi = ll_i2sbi(inode);
595         LASSERT(ll_async_page_slab);
596         LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
597
598         llap = llap_cast_private(page);
599         if (llap != NULL) {
600                 /* move to end of LRU list, except when page is just about to
601                  * die */
602                 if (origin != LLAP_ORIGIN_REMOVEPAGE) {
603                         spin_lock(&sbi->ll_lock);
604                         sbi->ll_pglist_gen++;
605                         list_del_init(&llap->llap_pglist_item);
606                         list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
607                         spin_unlock(&sbi->ll_lock);
608                 }
609                 GOTO(out, llap);
610         }
611
612         exp = ll_i2dtexp(page->mapping->host);
613         if (exp == NULL)
614                 RETURN(ERR_PTR(-EINVAL));
615
616         /* limit the number of lustre-cached pages */
617         if (sbi->ll_async_page_count >= sbi->ll_async_page_max)
618                 llap_shrink_cache(sbi, 0);
619
620         OBD_SLAB_ALLOC(llap, ll_async_page_slab, CFS_ALLOC_STD,
621                        ll_async_page_slab_size);
622         if (llap == NULL)
623                 RETURN(ERR_PTR(-ENOMEM));
624         llap->llap_magic = LLAP_MAGIC;
625         llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
626
627         rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
628                                  (obd_off)page->index << CFS_PAGE_SHIFT,
629                                  &ll_async_page_ops, llap, &llap->llap_cookie);
630         if (rc) {
631                 OBD_SLAB_FREE(llap, ll_async_page_slab,
632                               ll_async_page_slab_size);
633                 RETURN(ERR_PTR(rc));
634         }
635
636         CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
637                page, llap->llap_cookie, (obd_off)page->index << CFS_PAGE_SHIFT);
638         /* also zeroing the PRIVBITS low order bitflags */
639         __set_page_ll_data(page, llap);
640         llap->llap_page = page;
641         spin_lock(&sbi->ll_lock);
642         sbi->ll_pglist_gen++;
643         sbi->ll_async_page_count++;
644         list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
645         INIT_LIST_HEAD(&llap->llap_pending_write);
646         spin_unlock(&sbi->ll_lock);
647
648  out:
649         if (unlikely(sbi->ll_flags & LL_SBI_CHECKSUM)) {
650                 __u32 csum = 0;
651                 csum = crc32_le(csum, kmap(page), CFS_PAGE_SIZE);
652                 kunmap(page);
653                 if (origin == LLAP_ORIGIN_READAHEAD ||
654                     origin == LLAP_ORIGIN_READPAGE) {
655                         llap->llap_checksum = 0;
656                 } else if (origin == LLAP_ORIGIN_COMMIT_WRITE ||
657                            llap->llap_checksum == 0) {
658                         llap->llap_checksum = csum;
659                         CDEBUG(D_PAGE, "page %p cksum %x\n", page, csum);
660                 } else if (llap->llap_checksum == csum) {
661                         /* origin == LLAP_ORIGIN_WRITEPAGE */
662                         CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
663                                page, csum);
664                 } else {
665                         /* origin == LLAP_ORIGIN_WRITEPAGE */
666                         LL_CDEBUG_PAGE(D_ERROR, page, "old cksum %x != new "
667                                        "%x!\n", llap->llap_checksum, csum);
668                 }
669         }
670
671         llap->llap_origin = origin;
672         RETURN(llap);
673 }
674
675 static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
676                                struct ll_async_page *llap,
677                                unsigned to, obd_flag async_flags)
678 {
679         unsigned long size_index = i_size_read(inode) >> CFS_PAGE_SHIFT;
680         struct obd_io_group *oig;
681         struct ll_sb_info *sbi = ll_i2sbi(inode);
682         int rc, noquot = llap->llap_ignore_quota ? OBD_BRW_NOQUOTA : 0;
683         ENTRY;
684
685         /* _make_ready only sees llap once we've unlocked the page */
686         llap->llap_write_queued = 1;
687         rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
688                                 llap->llap_cookie, OBD_BRW_WRITE | noquot,
689                                 0, 0, 0, async_flags);
690         if (rc == 0) {
691                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
692                 GOTO(out, 0);
693         }
694
695         llap->llap_write_queued = 0;
696         /* Do not pass llap here as it is sync write. */
697         llap_write_pending(inode, NULL);
698         
699         rc = oig_init(&oig);
700         if (rc)
701                 GOTO(out, rc);
702
703         /* make full-page requests if we are not at EOF (bug 4410) */
704         if (to != CFS_PAGE_SIZE && llap->llap_page->index < size_index) {
705                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
706                                "sync write before EOF: size_index %lu, to %d\n",
707                                size_index, to);
708                 to = CFS_PAGE_SIZE;
709         } else if (to != CFS_PAGE_SIZE && llap->llap_page->index == size_index) {
710                 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
711                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
712                                "sync write at EOF: size_index %lu, to %d/%d\n",
713                                size_index, to, size_to);
714                 if (to < size_to)
715                         to = size_to;
716         }
717
718         /* compare the checksum once before the page leaves llite */
719         if (unlikely((sbi->ll_flags & LL_SBI_CHECKSUM) &&
720                      llap->llap_checksum != 0)) {
721                 __u32 csum = 0;
722                 struct page *page = llap->llap_page;
723                 csum = crc32_le(csum, kmap(page), CFS_PAGE_SIZE);
724                 kunmap(page);
725                 if (llap->llap_checksum == csum) {
726                         CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
727                                page, csum);
728                 } else {
729                         CERROR("page %p old cksum %x != new cksum %x!\n",
730                                page, llap->llap_checksum, csum);
731                 }
732         }
733
734         rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
735                                 llap->llap_cookie, OBD_BRW_WRITE | noquot,
736                                 0, to, 0, ASYNC_READY | ASYNC_URGENT |
737                                 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
738         if (rc)
739                 GOTO(free_oig, rc);
740
741         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
742         if (rc)
743                 GOTO(free_oig, rc);
744
745         rc = oig_wait(oig);
746
747         if (!rc && async_flags & ASYNC_READY) {
748                 unlock_page(llap->llap_page);
749                 if (PageWriteback(llap->llap_page)) {
750                         end_page_writeback(llap->llap_page);
751                 }
752         }
753
754         if (rc == 0 && llap_write_complete(inode, llap))
755                 ll_queue_done_writing(inode, 0);
756
757         LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
758
759 free_oig:
760         oig_release(oig);
761 out:
762         RETURN(rc);
763 }
764
765 /* update our write count to account for i_size increases that may have
766  * happened since we've queued the page for io. */
767
768 /* be careful not to return success without setting the page Uptodate or
769  * the next pass through prepare_write will read in stale data from disk. */
770 int ll_commit_write(struct file *file, struct page *page, unsigned from,
771                     unsigned to)
772 {
773         struct inode *inode = page->mapping->host;
774         struct ll_inode_info *lli = ll_i2info(inode);
775         struct lov_stripe_md *lsm = lli->lli_smd;
776         struct obd_export *exp;
777         struct ll_async_page *llap;
778         loff_t size;
779         int rc = 0;
780         ENTRY;
781
782         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
783         LASSERT(inode == file->f_dentry->d_inode);
784         LASSERT(PageLocked(page));
785
786         CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
787                inode, page, from, to, page->index);
788
789         llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
790         if (IS_ERR(llap))
791                 RETURN(PTR_ERR(llap));
792
793         exp = ll_i2dtexp(inode);
794         if (exp == NULL)
795                 RETURN(-EINVAL);
796
797         llap->llap_ignore_quota = capable(CAP_SYS_RESOURCE);
798
799         /*
800          * queue a write for some time in the future the first time we
801          * dirty the page.
802          *
803          * This is different from what other file systems do: they usually
804          * just mark page (and some of its buffers) dirty and rely on
805          * balance_dirty_pages() to start a write-back. Lustre wants write-back
806          * to be started earlier for the following reasons:
807          *
808          *     (1) with a large number of clients we need to limit the amount
809          *     of cached data on the clients a lot;
810          *
811          *     (2) large compute jobs generally want compute-only then io-only
812          *     and the IO should complete as quickly as possible;
813          *
814          *     (3) IO is batched up to the RPC size and is async until the
815          *     client max cache is hit
816          *     (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
817          *
818          */
819         if (!PageDirty(page)) {
820                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_MISSES, 1);
821
822                 rc = queue_or_sync_write(exp, inode, llap, to, 0);
823                 if (rc)
824                         GOTO(out, rc);
825         } else {
826                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_HITS, 1);
827         }
828
829         /* put the page in the page cache, from now on ll_removepage is
830          * responsible for cleaning up the llap.
831          * only set page dirty when it's queued to be write out */
832         if (llap->llap_write_queued)
833                 set_page_dirty(page);
834
835 out:
836         size = (((obd_off)page->index) << CFS_PAGE_SHIFT) + to;
837         ll_inode_size_lock(inode, 0);
838         if (rc == 0) {
839                 lov_stripe_lock(lsm);
840                 obd_adjust_kms(exp, lsm, size, 0);
841                 lov_stripe_unlock(lsm);
842                 if (size > i_size_read(inode))
843                         i_size_write(inode, size);
844                 SetPageUptodate(page);
845         } else if (size > i_size_read(inode)) {
846                 /* this page beyond the pales of i_size, so it can't be
847                  * truncated in ll_p_r_e during lock revoking. we must
848                  * teardown our book-keeping here. */
849                 ll_removepage(page);
850         }
851         ll_inode_size_unlock(inode, 0);
852         RETURN(rc);
853 }
854
855 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
856 {
857         struct ll_ra_info *ra = &sbi->ll_ra_info;
858         unsigned long ret;
859         ENTRY;
860
861         spin_lock(&sbi->ll_lock);
862         ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
863         ra->ra_cur_pages += ret;
864         spin_unlock(&sbi->ll_lock);
865
866         RETURN(ret);
867 }
868
869 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
870 {
871         struct ll_ra_info *ra = &sbi->ll_ra_info;
872         spin_lock(&sbi->ll_lock);
873         LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
874                  ra->ra_cur_pages, len);
875         ra->ra_cur_pages -= len;
876         spin_unlock(&sbi->ll_lock);
877 }
878
879 /* called for each page in a completed rpc.*/
880 int ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
881 {
882         struct ll_async_page *llap;
883         struct page *page;
884         int ret = 0;
885         ENTRY;
886
887         llap = LLAP_FROM_COOKIE(data);
888         page = llap->llap_page;
889         LASSERT(PageLocked(page));
890         LASSERT(CheckWriteback(page,cmd));
891         
892         LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
893
894         if (cmd & OBD_BRW_READ && llap->llap_defer_uptodate)
895                 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
896
897         if (rc == 0)  {
898                 if (cmd & OBD_BRW_READ) {
899                         if (!llap->llap_defer_uptodate)
900                                 SetPageUptodate(page);
901                 } else {
902                         llap->llap_write_queued = 0;
903                 }
904                 ClearPageError(page);
905         } else {
906                 if (cmd & OBD_BRW_READ) {
907                         llap->llap_defer_uptodate = 0;
908                 } else {
909                         ll_redirty_page(page);
910                         ret = 1;
911                 }
912                 SetPageError(page);
913         }
914
915         unlock_page(page);
916
917         if (cmd & OBD_BRW_WRITE) {
918                 /* Only rc == 0, write succeed, then this page could be deleted
919                  * from the pending_writing list 
920                  */
921                 if (rc == 0 && llap_write_complete(page->mapping->host, llap))
922                         ll_queue_done_writing(page->mapping->host, 0);
923         }
924
925         if (PageWriteback(page)) {
926                 end_page_writeback(page);
927         }
928         page_cache_release(page);
929
930         RETURN(ret);
931 }
932
933 /* the kernel calls us here when a page is unhashed from the page cache.
934  * the page will be locked and the kernel is holding a spinlock, so
935  * we need to be careful.  we're just tearing down our book-keeping
936  * here. */
937 void ll_removepage(struct page *page)
938 {
939         struct inode *inode = page->mapping->host;
940         struct obd_export *exp;
941         struct ll_async_page *llap;
942         struct ll_sb_info *sbi = ll_i2sbi(inode);
943         int rc;
944         ENTRY;
945
946         LASSERT(!in_interrupt());
947
948         /* sync pages or failed read pages can leave pages in the page
949          * cache that don't have our data associated with them anymore */
950         if (page_private(page) == 0) {
951                 EXIT;
952                 return;
953         }
954
955         LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
956
957         exp = ll_i2dtexp(inode);
958         if (exp == NULL) {
959                 CERROR("page %p ind %lu gave null export\n", page, page->index);
960                 EXIT;
961                 return;
962         }
963
964         llap = llap_from_page(page, LLAP_ORIGIN_REMOVEPAGE);
965         if (IS_ERR(llap)) {
966                 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
967                        page->index, PTR_ERR(llap));
968                 EXIT;
969                 return;
970         }
971
972         if (llap_write_complete(inode, llap))
973                 ll_queue_done_writing(inode, 0);
974
975         rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
976                                      llap->llap_cookie);
977         if (rc != 0)
978                 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
979
980         /* this unconditional free is only safe because the page lock
981          * is providing exclusivity to memory pressure/truncate/writeback..*/
982         __clear_page_ll_data(page);
983
984         spin_lock(&sbi->ll_lock);
985         if (!list_empty(&llap->llap_pglist_item))
986                 list_del_init(&llap->llap_pglist_item);
987         sbi->ll_pglist_gen++;
988         sbi->ll_async_page_count--;
989         spin_unlock(&sbi->ll_lock);
990         OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
991         EXIT;
992 }
993
994 static int ll_page_matches(struct page *page, int fd_flags)
995 {
996         struct lustre_handle match_lockh = {0};
997         struct inode *inode = page->mapping->host;
998         ldlm_policy_data_t page_extent;
999         int flags, matches;
1000         ENTRY;
1001
1002         if (unlikely(fd_flags & LL_FILE_GROUP_LOCKED))
1003                 RETURN(1);
1004
1005         page_extent.l_extent.start = (__u64)page->index << CFS_PAGE_SHIFT;
1006         page_extent.l_extent.end =
1007                 page_extent.l_extent.start + CFS_PAGE_SIZE - 1;
1008         flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
1009         if (!(fd_flags & LL_FILE_READAHEAD))
1010                 flags |= LDLM_FL_CBPENDING;
1011         matches = obd_match(ll_i2sbi(inode)->ll_dt_exp,
1012                             ll_i2info(inode)->lli_smd, LDLM_EXTENT,
1013                             &page_extent, LCK_PR | LCK_PW, &flags, inode,
1014                             &match_lockh);
1015         RETURN(matches);
1016 }
1017
1018 static int ll_issue_page_read(struct obd_export *exp,
1019                               struct ll_async_page *llap,
1020                               struct obd_io_group *oig, int defer)
1021 {
1022         struct page *page = llap->llap_page;
1023         int rc;
1024
1025         page_cache_get(page);
1026         llap->llap_defer_uptodate = defer;
1027         llap->llap_ra_used = 0;
1028         rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
1029                                 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
1030                                 CFS_PAGE_SIZE, 0, ASYNC_COUNT_STABLE |
1031                                                   ASYNC_READY | ASYNC_URGENT);
1032         if (rc) {
1033                 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
1034                 page_cache_release(page);
1035         }
1036         RETURN(rc);
1037 }
1038
1039 static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
1040 {
1041         LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
1042         ra->ra_stats[which]++;
1043 }
1044
1045 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
1046 {
1047         struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
1048         struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
1049
1050         spin_lock(&sbi->ll_lock);
1051         ll_ra_stats_inc_unlocked(ra, which);
1052         spin_unlock(&sbi->ll_lock);
1053 }
1054
1055 void ll_ra_accounting(struct ll_async_page *llap, struct address_space *mapping)
1056 {
1057         if (!llap->llap_defer_uptodate || llap->llap_ra_used)
1058                 return;
1059
1060         ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
1061 }
1062
1063 #define RAS_CDEBUG(ras) \
1064         CDEBUG(D_READA,                                                      \
1065                "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu\n", \
1066                ras->ras_last_readpage, ras->ras_consecutive_requests,        \
1067                ras->ras_consecutive_pages, ras->ras_window_start,            \
1068                ras->ras_window_len, ras->ras_next_readahead,                 \
1069                ras->ras_requests, ras->ras_request_index);
1070
1071 static int index_in_window(unsigned long index, unsigned long point,
1072                            unsigned long before, unsigned long after)
1073 {
1074         unsigned long start = point - before, end = point + after;
1075
1076         if (start > point)
1077                start = 0;
1078         if (end < point)
1079                end = ~0;
1080
1081         return start <= index && index <= end;
1082 }
1083
1084 static struct ll_readahead_state *ll_ras_get(struct file *f)
1085 {
1086         struct ll_file_data       *fd;
1087
1088         fd = LUSTRE_FPRIVATE(f);
1089         return &fd->fd_ras;
1090 }
1091
1092 void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
1093 {
1094         struct ll_readahead_state *ras;
1095
1096         ras = ll_ras_get(f);
1097
1098         spin_lock(&ras->ras_lock);
1099         ras->ras_requests++;
1100         ras->ras_request_index = 0;
1101         ras->ras_consecutive_requests++;
1102         rar->lrr_reader = current;
1103
1104         list_add(&rar->lrr_linkage, &ras->ras_read_beads);
1105         spin_unlock(&ras->ras_lock);
1106 }
1107
1108 void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
1109 {
1110         struct ll_readahead_state *ras;
1111
1112         ras = ll_ras_get(f);
1113
1114         spin_lock(&ras->ras_lock);
1115         list_del_init(&rar->lrr_linkage);
1116         spin_unlock(&ras->ras_lock);
1117 }
1118
1119 static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
1120 {
1121         struct ll_ra_read *scan;
1122
1123         list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
1124                 if (scan->lrr_reader == current)
1125                         return scan;
1126         }
1127         return NULL;
1128 }
1129
1130 struct ll_ra_read *ll_ra_read_get(struct file *f)
1131 {
1132         struct ll_readahead_state *ras;
1133         struct ll_ra_read         *bead;
1134
1135         ras = ll_ras_get(f);
1136
1137         spin_lock(&ras->ras_lock);
1138         bead = ll_ra_read_get_locked(ras);
1139         spin_unlock(&ras->ras_lock);
1140         return bead;
1141 }
1142
1143 static int ll_readahead(struct ll_readahead_state *ras,
1144                          struct obd_export *exp, struct address_space *mapping,
1145                          struct obd_io_group *oig, int flags)
1146 {
1147         unsigned long i, start = 0, end = 0, reserved;
1148         struct ll_async_page *llap;
1149         struct page *page;
1150         int rc, ret = 0, match_failed = 0;
1151         __u64 kms;
1152         unsigned int gfp_mask;
1153         struct inode *inode;
1154         struct lov_stripe_md *lsm;
1155         struct ll_ra_read *bead;
1156         struct ost_lvb lvb;
1157         ENTRY;
1158
1159         inode = mapping->host;
1160         lsm = ll_i2info(inode)->lli_smd;
1161
1162         lov_stripe_lock(lsm);
1163         inode_init_lvb(inode, &lvb);
1164         obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
1165         kms = lvb.lvb_size;
1166         lov_stripe_unlock(lsm);
1167         if (kms == 0) {
1168                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
1169                 RETURN(0);
1170         }
1171
1172         spin_lock(&ras->ras_lock);
1173         bead = ll_ra_read_get_locked(ras);
1174         /* Enlarge the RA window to encompass the full read */
1175         if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
1176             bead->lrr_start + bead->lrr_count) {
1177                 ras->ras_window_len = bead->lrr_start + bead->lrr_count -
1178                                       ras->ras_window_start;
1179         }
1180         /* Reserve a part of the read-ahead window that we'll be issuing */
1181         if (ras->ras_window_len) {
1182                 start = ras->ras_next_readahead;
1183                 end = ras->ras_window_start + ras->ras_window_len - 1;
1184         }
1185         if (end != 0) {
1186                 /* Truncate RA window to end of file */
1187                 end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
1188                 ras->ras_next_readahead = max(end, end + 1);
1189                 RAS_CDEBUG(ras);
1190         }
1191         spin_unlock(&ras->ras_lock);
1192
1193         if (end == 0) {
1194                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
1195                 RETURN(0);
1196         }
1197
1198         reserved = ll_ra_count_get(ll_i2sbi(inode), end - start + 1);
1199         if (reserved < end - start + 1)
1200                 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
1201
1202         gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
1203 #ifdef __GFP_NOWARN
1204         gfp_mask |= __GFP_NOWARN;
1205 #endif
1206
1207         for (i = start; reserved > 0 && !match_failed && i <= end; i++) {
1208                 /* skip locked pages from previous readpage calls */
1209                 page = grab_cache_page_nowait_gfp(mapping, i, gfp_mask);
1210                 if (page == NULL) {
1211                         ll_ra_stats_inc(mapping, RA_STAT_FAILED_GRAB_PAGE);
1212                         CDEBUG(D_READA, "g_c_p_n failed\n");
1213                         continue;
1214                 }
1215
1216                 /* Check if page was truncated or reclaimed */
1217                 if (page->mapping != mapping) {
1218                         ll_ra_stats_inc(mapping, RA_STAT_WRONG_GRAB_PAGE);
1219                         CDEBUG(D_READA, "g_c_p_n returned invalid page\n");
1220                         goto next_page;
1221                 }
1222
1223                 /* we do this first so that we can see the page in the /proc
1224                  * accounting */
1225                 llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
1226                 if (IS_ERR(llap) || llap->llap_defer_uptodate)
1227                         goto next_page;
1228
1229                 /* skip completed pages */
1230                 if (Page_Uptodate(page))
1231                         goto next_page;
1232
1233                 /* bail when we hit the end of the lock. */
1234                 if ((rc = ll_page_matches(page, flags|LL_FILE_READAHEAD)) <= 0){
1235                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
1236                                        "lock match failed: rc %d\n", rc);
1237                         ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
1238                         match_failed = 1;
1239                         goto next_page;
1240                 }
1241
1242                 rc = ll_issue_page_read(exp, llap, oig, 1);
1243                 if (rc == 0) {
1244                         reserved--;
1245                         ret++;
1246                         LL_CDEBUG_PAGE(D_READA| D_PAGE, page,
1247                                        "started read-ahead\n");
1248                 } else {
1249         next_page:
1250                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
1251                                        "skipping read-ahead\n");
1252
1253                         unlock_page(page);
1254                 }
1255                 page_cache_release(page);
1256         }
1257
1258         LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
1259         if (reserved != 0)
1260                 ll_ra_count_put(ll_i2sbi(inode), reserved);
1261         if (i == end + 1 && end == (kms >> CFS_PAGE_SHIFT))
1262                 ll_ra_stats_inc(mapping, RA_STAT_EOF);
1263
1264         /* if we didn't get to the end of the region we reserved from
1265          * the ras we need to go back and update the ras so that the
1266          * next read-ahead tries from where we left off.  we only do so
1267          * if the region we failed to issue read-ahead on is still ahead
1268          * of the app and behind the next index to start read-ahead from */
1269         if (i != end + 1) {
1270                 spin_lock(&ras->ras_lock);
1271                 if (i < ras->ras_next_readahead &&
1272                     index_in_window(i, ras->ras_window_start, 0,
1273                                     ras->ras_window_len)) {
1274                         ras->ras_next_readahead = i;
1275                         RAS_CDEBUG(ras);
1276                 }
1277                 spin_unlock(&ras->ras_lock);
1278         }
1279
1280         RETURN(ret);
1281 }
1282
1283 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
1284 {
1285         ras->ras_window_start = index & (~((1024 * 1024 >> CFS_PAGE_SHIFT) - 1));
1286 }
1287
1288 /* called with the ras_lock held or from places where it doesn't matter */
1289 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
1290 {
1291         ras->ras_last_readpage = index;
1292         ras->ras_consecutive_requests = 0;
1293         ras->ras_consecutive_pages = 0;
1294         ras->ras_window_len = 0;
1295         ras_set_start(ras, index);
1296         ras->ras_next_readahead = max(ras->ras_window_start, index);
1297
1298         RAS_CDEBUG(ras);
1299 }
1300
1301 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
1302 {
1303         spin_lock_init(&ras->ras_lock);
1304         ras_reset(ras, 0);
1305         ras->ras_requests = 0;
1306         INIT_LIST_HEAD(&ras->ras_read_beads);
1307 }
1308
1309 static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
1310                        struct ll_readahead_state *ras, unsigned long index,
1311                        unsigned hit)
1312 {
1313         struct ll_ra_info *ra = &sbi->ll_ra_info;
1314         int zero = 0;
1315         ENTRY;
1316
1317         spin_lock(&sbi->ll_lock);
1318         spin_lock(&ras->ras_lock);
1319
1320         ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
1321
1322         /* reset the read-ahead window in two cases.  First when the app seeks
1323          * or reads to some other part of the file.  Secondly if we get a
1324          * read-ahead miss that we think we've previously issued.  This can
1325          * be a symptom of there being so many read-ahead pages that the VM is
1326          * reclaiming it before we get to it. */
1327         if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
1328                 zero = 1;
1329                 ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
1330         } else if (!hit && ras->ras_window_len &&
1331                    index < ras->ras_next_readahead &&
1332                    index_in_window(index, ras->ras_window_start, 0,
1333                                    ras->ras_window_len)) {
1334                 zero = 1;
1335                 ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
1336         }
1337
1338         /* On the second access to a file smaller than the tunable
1339          * ra_max_read_ahead_whole_pages trigger RA on all pages in the
1340          * file up to ra_max_pages.  This is simply a best effort and
1341          * only occurs once per open file.  Normal RA behavior is reverted
1342          * to for subsequent IO.  The mmap case does not increment
1343          * ras_requests and thus can never trigger this behavior. */
1344         if (ras->ras_requests == 2 && !ras->ras_request_index) {
1345                 __u64 kms_pages;
1346
1347                 kms_pages = (i_size_read(inode) + CFS_PAGE_SIZE - 1) >>
1348                             CFS_PAGE_SHIFT;
1349
1350                 CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages,
1351                        ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages);
1352
1353                 if (kms_pages &&
1354                     kms_pages <= ra->ra_max_read_ahead_whole_pages) {
1355                         ras->ras_window_start = 0;
1356                         ras->ras_last_readpage = 0;
1357                         ras->ras_next_readahead = 0;
1358                         ras->ras_window_len = min(ra->ra_max_pages,
1359                                 ra->ra_max_read_ahead_whole_pages);
1360                         GOTO(out_unlock, 0);
1361                 }
1362         }
1363
1364         if (zero) {
1365                 ras_reset(ras, index);
1366                 GOTO(out_unlock, 0);
1367         }
1368
1369         ras->ras_last_readpage = index;
1370         ras->ras_consecutive_pages++;
1371         ras_set_start(ras, index);
1372         ras->ras_next_readahead = max(ras->ras_window_start,
1373                                       ras->ras_next_readahead);
1374
1375         /* Trigger RA in the mmap case where ras_consecutive_requests
1376          * is not incremented and thus can't be used to trigger RA */
1377         if (!ras->ras_window_len && ras->ras_consecutive_pages == 3) {
1378                 ras->ras_window_len = 1024 * 1024 >> CFS_PAGE_SHIFT;
1379                 GOTO(out_unlock, 0);
1380         }
1381
1382         /* The initial ras_window_len is set to the request size.  To avoid
1383          * uselessly reading and discarding pages for random IO the window is
1384          * only increased once per consecutive request received. */
1385         if (ras->ras_consecutive_requests > 1 && !ras->ras_request_index) {
1386                 ras->ras_window_len = min(ras->ras_window_len +
1387                                           (1024 * 1024 >> CFS_PAGE_SHIFT),
1388                                           ra->ra_max_pages);
1389         }
1390
1391         EXIT;
1392 out_unlock:
1393         RAS_CDEBUG(ras);
1394         ras->ras_request_index++;
1395         spin_unlock(&ras->ras_lock);
1396         spin_unlock(&sbi->ll_lock);
1397         return;
1398 }
1399
1400 int ll_writepage(struct page *page)
1401 {
1402         struct inode *inode = page->mapping->host;
1403         struct ll_inode_info *lli = ll_i2info(inode);
1404         struct obd_export *exp;
1405         struct ll_async_page *llap;
1406         int rc = 0;
1407         ENTRY;
1408
1409         LASSERT(!PageDirty(page));
1410         LASSERT(PageLocked(page));
1411
1412         exp = ll_i2dtexp(inode);
1413         if (exp == NULL)
1414                 GOTO(out, rc = -EINVAL);
1415
1416         llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
1417         if (IS_ERR(llap))
1418                 GOTO(out, rc = PTR_ERR(llap));
1419
1420         LASSERT(!PageWriteback(page));
1421         set_page_writeback(page);
1422
1423         page_cache_get(page);
1424         if (llap->llap_write_queued) {
1425                 LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
1426                 rc = obd_set_async_flags(exp, lli->lli_smd, NULL,
1427                                          llap->llap_cookie,
1428                                          ASYNC_READY | ASYNC_URGENT);
1429         } else {
1430                 rc = queue_or_sync_write(exp, inode, llap, CFS_PAGE_SIZE,
1431                                          ASYNC_READY | ASYNC_URGENT);
1432         }
1433         if (rc)
1434                 page_cache_release(page);
1435 out:
1436         if (rc) {
1437                 if (!lli->lli_async_rc)
1438                         lli->lli_async_rc = rc;
1439                 /* re-dirty page on error so it retries write */
1440                 if (PageWriteback(page)) {
1441                         end_page_writeback(page);
1442                 }
1443                 ll_redirty_page(page);
1444                 unlock_page(page);
1445         }
1446         RETURN(rc);
1447 }
1448
1449 /*
1450  * for now we do our readpage the same on both 2.4 and 2.5.  The kernel's
1451  * read-ahead assumes it is valid to issue readpage all the way up to
1452  * i_size, but our dlm locks make that not the case.  We disable the
1453  * kernel's read-ahead and do our own by walking ahead in the page cache
1454  * checking for dlm lock coverage.  the main difference between 2.4 and
1455  * 2.6 is how read-ahead gets batched and issued, but we're using our own,
1456  * so they look the same.
1457  */
1458 int ll_readpage(struct file *filp, struct page *page)
1459 {
1460         struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
1461         struct inode *inode = page->mapping->host;
1462         struct obd_export *exp;
1463         struct ll_async_page *llap;
1464         struct obd_io_group *oig = NULL;
1465         int rc;
1466         ENTRY;
1467
1468         LASSERT(PageLocked(page));
1469         LASSERT(!PageUptodate(page));
1470         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset=%Lu=%#Lx\n",
1471                inode->i_ino, inode->i_generation, inode,
1472                (((loff_t)page->index) << CFS_PAGE_SHIFT),
1473                (((loff_t)page->index) << CFS_PAGE_SHIFT));
1474         LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
1475
1476         if (!ll_i2info(inode)->lli_smd) {
1477                 /* File with no objects - one big hole */
1478                 /* We use this just for remove_from_page_cache that is not
1479                  * exported, we'd make page back up to date. */
1480                 ll_truncate_complete_page(page);
1481                 clear_page(kmap(page));
1482                 kunmap(page);
1483                 SetPageUptodate(page);
1484                 unlock_page(page);
1485                 RETURN(0);
1486         }
1487
1488         rc = oig_init(&oig);
1489         if (rc < 0)
1490                 GOTO(out, rc);
1491
1492         exp = ll_i2dtexp(inode);
1493         if (exp == NULL)
1494                 GOTO(out, rc = -EINVAL);
1495
1496         llap = llap_from_page(page, LLAP_ORIGIN_READPAGE);
1497         if (IS_ERR(llap))
1498                 GOTO(out, rc = PTR_ERR(llap));
1499
1500         llap->llap_fsuid = current->fsuid;
1501
1502         if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1503                 ras_update(ll_i2sbi(inode), inode, &fd->fd_ras, page->index,
1504                            llap->llap_defer_uptodate);
1505
1506         if (llap->llap_defer_uptodate) {
1507                 llap->llap_ra_used = 1;
1508                 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1509                                   fd->fd_flags);
1510                 if (rc > 0)
1511                         obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
1512                                              NULL, oig);
1513                 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
1514                 SetPageUptodate(page);
1515                 unlock_page(page);
1516                 GOTO(out_oig, rc = 0);
1517         }
1518
1519         if (likely((fd->fd_flags & LL_FILE_IGNORE_LOCK) == 0)) {
1520                 rc = ll_page_matches(page, fd->fd_flags);
1521                 if (rc < 0) {
1522                         LL_CDEBUG_PAGE(D_ERROR, page, "lock match failed: rc %d\n", rc);
1523                         GOTO(out, rc);
1524                 }
1525
1526                 if (rc == 0) {
1527                         CWARN("ino %lu page %lu (%llu) not covered by "
1528                               "a lock (mmap?).  check debug logs.\n",
1529                               inode->i_ino, page->index,
1530                               (long long)page->index << CFS_PAGE_SHIFT);
1531                 }
1532         }
1533
1534         rc = ll_issue_page_read(exp, llap, oig, 0);
1535         if (rc)
1536                 GOTO(out, rc);
1537
1538         LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
1539         if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1540                 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1541                              fd->fd_flags);
1542
1543         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
1544
1545 out:
1546         if (rc)
1547                 unlock_page(page);
1548 out_oig:
1549         if (oig != NULL)
1550                 oig_release(oig);
1551         RETURN(rc);
1552 }