Whamcloud - gitweb
8af953fee721e74234dac6d926e3e0192593cb05
[fs/lustre-release.git] / lustre / llite / rw.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Lite I/O page cache routines shared by different kernel revs
5  *
6  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23 #ifndef AUTOCONF_INCLUDED
24 #include <linux/config.h>
25 #endif
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/string.h>
29 #include <linux/stat.h>
30 #include <linux/errno.h>
31 #include <linux/smp_lock.h>
32 #include <linux/unistd.h>
33 #include <linux/version.h>
34 #include <asm/system.h>
35 #include <asm/uaccess.h>
36
37 #include <linux/fs.h>
38 #include <linux/stat.h>
39 #include <asm/uaccess.h>
40 #include <asm/segment.h>
41 #include <linux/mm.h>
42 #include <linux/pagemap.h>
43 #include <linux/smp_lock.h>
44
45 #define DEBUG_SUBSYSTEM S_LLITE
46
47 //#include <lustre_mdc.h>
48 #include <lustre_lite.h>
49 #include "llite_internal.h"
50 #include <linux/lustre_compat25.h>
51
52 #ifndef list_for_each_prev_safe
53 #define list_for_each_prev_safe(pos, n, head) \
54         for (pos = (head)->prev, n = pos->prev; pos != (head); \
55                 pos = n, n = pos->prev )
56 #endif
57
58 cfs_mem_cache_t *ll_async_page_slab = NULL;
59 size_t ll_async_page_slab_size = 0;
60
61 /* SYNCHRONOUS I/O to object storage for an inode */
62 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
63                   struct page *page, int flags)
64 {
65         struct ll_inode_info *lli = ll_i2info(inode);
66         struct lov_stripe_md *lsm = lli->lli_smd;
67         struct obd_info oinfo = { { { 0 } } };
68         struct brw_page pg;
69         int opc, rc;
70         ENTRY;
71
72         pg.pg = page;
73         pg.off = ((obd_off)page->index) << CFS_PAGE_SHIFT;
74
75         if ((cmd & OBD_BRW_WRITE) && (pg.off+CFS_PAGE_SIZE>i_size_read(inode)))
76                 pg.count = i_size_read(inode) % CFS_PAGE_SIZE;
77         else
78                 pg.count = CFS_PAGE_SIZE;
79
80         LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
81                        cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
82                        inode->i_ino, pg.off, pg.off);
83         if (pg.count == 0) {
84                 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
85                        LPU64"\n", inode->i_ino, inode, i_size_read(inode),
86                        page->mapping->host, i_size_read(page->mapping->host),
87                        page->index, pg.off);
88         }
89
90         pg.flag = flags;
91
92         if (cmd & OBD_BRW_WRITE)
93                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_WRITE,
94                                    pg.count);
95         else
96                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_READ,
97                                    pg.count);
98         oinfo.oi_oa = oa;
99         oinfo.oi_md = lsm;
100         /* NB partial write, so we might not have CAPA_OPC_OSS_READ capa */
101         opc = cmd & OBD_BRW_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
102         oinfo.oi_capa = ll_osscapa_get(inode, opc);
103         rc = obd_brw(cmd, ll_i2dtexp(inode), &oinfo, 1, &pg, NULL);
104         capa_put(oinfo.oi_capa);
105         if (rc == 0)
106                 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
107         else if (rc != -EIO)
108                 CERROR("error from obd_brw: rc = %d\n", rc);
109         RETURN(rc);
110 }
111
112 /* this isn't where truncate starts.   roughly:
113  * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate. setattr_raw grabs
114  * DLM lock on [size, EOF], i_mutex, ->lli_size_sem, and WRITE_I_ALLOC_SEM to
115  * avoid races.
116  *
117  * must be called under ->lli_size_sem */
118 void ll_truncate(struct inode *inode)
119 {
120         struct ll_inode_info *lli = ll_i2info(inode);
121         struct obd_info oinfo = { { { 0 } } };
122         struct ost_lvb lvb;
123         struct obdo oa;
124         int rc;
125         ENTRY;
126         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu=%#Lx\n",inode->i_ino,
127                inode->i_generation, inode, i_size_read(inode),
128                i_size_read(inode));
129
130         ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
131         if (lli->lli_size_sem_owner != current) {
132                 EXIT;
133                 return;
134         }
135
136         if (!lli->lli_smd) {
137                 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
138                        inode->i_ino);
139                 GOTO(out_unlock, 0);
140         }
141
142         LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
143
144         /* XXX I'm pretty sure this is a hack to paper over a more fundamental
145          * race condition. */
146         lov_stripe_lock(lli->lli_smd);
147         inode_init_lvb(inode, &lvb);
148         rc = obd_merge_lvb(ll_i2dtexp(inode), lli->lli_smd, &lvb, 0);
149         if (lvb.lvb_size == i_size_read(inode) && rc == 0) {
150                 CDEBUG(D_VFSTRACE, "skipping punch for obj "LPX64", %Lu=%#Lx\n",
151                        lli->lli_smd->lsm_object_id, i_size_read(inode),
152                        i_size_read(inode));
153                 lov_stripe_unlock(lli->lli_smd);
154                 GOTO(out_unlock, 0);
155         }
156
157         obd_adjust_kms(ll_i2dtexp(inode), lli->lli_smd, i_size_read(inode), 1);
158         lov_stripe_unlock(lli->lli_smd);
159
160         if (unlikely((ll_i2sbi(inode)->ll_flags & LL_SBI_CHECKSUM) &&
161                      (i_size_read(inode) & ~CFS_PAGE_MASK))) {
162                 /* If the truncate leaves behind a partial page, update its
163                  * checksum. */
164                 struct page *page = find_get_page(inode->i_mapping,
165                                                   i_size_read(inode) >>
166                                                   CFS_PAGE_SHIFT);
167                 if (page != NULL) {
168                         struct ll_async_page *llap = llap_cast_private(page);
169                         if (llap != NULL) {
170                                 char *kaddr = kmap_atomic(page, KM_USER0);
171                                 llap->llap_checksum =
172                                         crc32_le(0, kaddr, CFS_PAGE_SIZE);
173                                 kunmap_atomic(kaddr, KM_USER0);
174                         }
175                         page_cache_release(page);
176                 }
177         }
178
179         CDEBUG(D_INFO, "calling punch for "LPX64" (new size %Lu=%#Lx)\n",
180                lli->lli_smd->lsm_object_id, i_size_read(inode), i_size_read(inode));
181
182         oinfo.oi_md = lli->lli_smd;
183         oinfo.oi_policy.l_extent.start = i_size_read(inode);
184         oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
185         oinfo.oi_oa = &oa;
186         oa.o_id = lli->lli_smd->lsm_object_id;
187         oa.o_gr = lli->lli_smd->lsm_object_gr;
188         oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
189
190         obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |
191                         OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME |
192                         OBD_MD_FLFID | OBD_MD_FLGENER);
193
194         ll_inode_size_unlock(inode, 0);
195
196         oinfo.oi_capa = ll_osscapa_get(inode, CAPA_OPC_OSS_TRUNC);
197         rc = obd_punch_rqset(ll_i2dtexp(inode), &oinfo, NULL);
198         ll_truncate_free_capa(oinfo.oi_capa);
199         if (rc)
200                 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
201         else
202                 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
203                               OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
204         EXIT;
205         return;
206
207  out_unlock:
208         ll_inode_size_unlock(inode, 0);
209 } /* ll_truncate */
210
211 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
212                      unsigned to)
213 {
214         struct inode *inode = page->mapping->host;
215         struct ll_inode_info *lli = ll_i2info(inode);
216         struct lov_stripe_md *lsm = lli->lli_smd;
217         obd_off offset = ((obd_off)page->index) << CFS_PAGE_SHIFT;
218         struct obd_info oinfo = { { { 0 } } };
219         struct brw_page pga;
220         struct obdo oa;
221         struct ost_lvb lvb;
222         int rc = 0;
223         ENTRY;
224
225         LASSERT(PageLocked(page));
226         (void)llap_cast_private(page); /* assertion */
227
228         /* Check to see if we should return -EIO right away */
229         pga.pg = page;
230         pga.off = offset;
231         pga.count = CFS_PAGE_SIZE;
232         pga.flag = 0;
233
234         oa.o_mode = inode->i_mode;
235         oa.o_id = lsm->lsm_object_id;
236         oa.o_gr = lsm->lsm_object_gr;
237         oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | 
238                      OBD_MD_FLTYPE | OBD_MD_FLGROUP;
239         obdo_from_inode(&oa, inode, OBD_MD_FLFID | OBD_MD_FLGENER);
240
241         oinfo.oi_oa = &oa;
242         oinfo.oi_md = lsm;
243         rc = obd_brw(OBD_BRW_CHECK, ll_i2dtexp(inode), &oinfo, 1, &pga, NULL);
244         if (rc)
245                 RETURN(rc);
246
247         if (PageUptodate(page)) {
248                 LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
249                 RETURN(0);
250         }
251
252         /* We're completely overwriting an existing page, so _don't_ set it up
253          * to date until commit_write */
254         if (from == 0 && to == CFS_PAGE_SIZE) {
255                 LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
256                 POISON_PAGE(page, 0x11);
257                 RETURN(0);
258         }
259
260         /* If are writing to a new page, no need to read old data.  The extent
261          * locking will have updated the KMS, and for our purposes here we can
262          * treat it like i_size. */
263         lov_stripe_lock(lsm);
264         inode_init_lvb(inode, &lvb);
265         obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
266         lov_stripe_unlock(lsm);
267         if (lvb.lvb_size <= offset) {
268                 char *kaddr = kmap_atomic(page, KM_USER0);
269                 LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
270                                lvb.lvb_size, offset);
271                 memset(kaddr, 0, CFS_PAGE_SIZE);
272                 kunmap_atomic(kaddr, KM_USER0);
273                 GOTO(prepare_done, rc = 0);
274         }
275
276         /* XXX could be an async ocp read.. read-ahead? */
277         rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
278         if (rc == 0) {
279                 /* bug 1598: don't clobber blksize */
280                 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
281                 obdo_refresh_inode(inode, &oa, oa.o_valid);
282         }
283
284         EXIT;
285  prepare_done:
286         if (rc == 0)
287                 SetPageUptodate(page);
288
289         return rc;
290 }
291
292 static int ll_ap_make_ready(void *data, int cmd)
293 {
294         struct ll_async_page *llap;
295         struct page *page;
296         ENTRY;
297
298         llap = LLAP_FROM_COOKIE(data);
299         page = llap->llap_page;
300
301         LASSERTF(!(cmd & OBD_BRW_READ), "cmd %x page %p ino %lu index %lu\n", cmd, page,
302                  page->mapping->host->i_ino, page->index);
303
304         /* we're trying to write, but the page is locked.. come back later */
305         if (TryLockPage(page))
306                 RETURN(-EAGAIN);
307
308         LASSERT(!PageWriteback(page));
309
310         /* if we left PageDirty we might get another writepage call
311          * in the future.  list walkers are bright enough
312          * to check page dirty so we can leave it on whatever list
313          * its on.  XXX also, we're called with the cli list so if
314          * we got the page cache list we'd create a lock inversion
315          * with the removepage path which gets the page lock then the
316          * cli lock */
317         LASSERTF(!PageWriteback(page),"cmd %x page %p ino %lu index %lu\n", cmd, page,
318                  page->mapping->host->i_ino, page->index);
319         clear_page_dirty_for_io(page);
320
321         /* This actually clears the dirty bit in the radix tree.*/
322         set_page_writeback(page);
323
324         LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
325         page_cache_get(page);
326
327         RETURN(0);
328 }
329
330 /* We have two reasons for giving llite the opportunity to change the
331  * write length of a given queued page as it builds the RPC containing
332  * the page:
333  *
334  * 1) Further extending writes may have landed in the page cache
335  *    since a partial write first queued this page requiring us
336  *    to write more from the page cache.  (No further races are possible, since
337  *    by the time this is called, the page is locked.)
338  * 2) We might have raced with truncate and want to avoid performing
339  *    write RPCs that are just going to be thrown away by the
340  *    truncate's punch on the storage targets.
341  *
342  * The kms serves these purposes as it is set at both truncate and extending
343  * writes.
344  */
345 static int ll_ap_refresh_count(void *data, int cmd)
346 {
347         struct ll_inode_info *lli;
348         struct ll_async_page *llap;
349         struct lov_stripe_md *lsm;
350         struct page *page;
351         struct inode *inode;
352         struct ost_lvb lvb;
353         __u64 kms;
354         ENTRY;
355
356         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
357         LASSERT(cmd != OBD_BRW_READ);
358
359         llap = LLAP_FROM_COOKIE(data);
360         page = llap->llap_page;
361         inode = page->mapping->host;
362         lli = ll_i2info(inode);
363         lsm = lli->lli_smd;
364
365         lov_stripe_lock(lsm);
366         inode_init_lvb(inode, &lvb);
367         obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
368         kms = lvb.lvb_size;
369         lov_stripe_unlock(lsm);
370
371         /* catch race with truncate */
372         if (((__u64)page->index << CFS_PAGE_SHIFT) >= kms)
373                 return 0;
374
375         /* catch sub-page write at end of file */
376         if (((__u64)page->index << CFS_PAGE_SHIFT) + CFS_PAGE_SIZE > kms)
377                 return kms % CFS_PAGE_SIZE;
378
379         return CFS_PAGE_SIZE;
380 }
381
382 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
383 {
384         struct lov_stripe_md *lsm;
385         obd_flag valid_flags;
386
387         lsm = ll_i2info(inode)->lli_smd;
388
389         oa->o_id = lsm->lsm_object_id;
390         oa->o_gr = lsm->lsm_object_gr;
391         oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
392         valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
393         if (cmd & OBD_BRW_WRITE) {
394                 oa->o_valid |= OBD_MD_FLEPOCH;
395                 oa->o_easize = ll_i2info(inode)->lli_ioepoch;
396
397                 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
398                         OBD_MD_FLUID | OBD_MD_FLGID |
399                         OBD_MD_FLFID | OBD_MD_FLGENER;
400         }
401
402         obdo_from_inode(oa, inode, valid_flags);
403 }
404
405 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
406 {
407         struct ll_async_page *llap;
408         ENTRY;
409
410         llap = LLAP_FROM_COOKIE(data);
411         ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
412
413         EXIT;
414 }
415
416 static void ll_ap_update_obdo(void *data, int cmd, struct obdo *oa,
417                               obd_valid valid)
418 {
419         struct ll_async_page *llap;
420         ENTRY;
421
422         llap = LLAP_FROM_COOKIE(data);
423         obdo_from_inode(oa, llap->llap_page->mapping->host, valid);
424
425         EXIT;
426 }
427
428 static struct obd_capa *ll_ap_lookup_capa(void *data, int cmd)
429 {
430         struct ll_async_page *llap = LLAP_FROM_COOKIE(data);
431         int opc = cmd & OBD_BRW_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
432
433         return ll_osscapa_get(llap->llap_page->mapping->host, opc);
434 }
435
436 static struct obd_async_page_ops ll_async_page_ops = {
437         .ap_make_ready =        ll_ap_make_ready,
438         .ap_refresh_count =     ll_ap_refresh_count,
439         .ap_fill_obdo =         ll_ap_fill_obdo,
440         .ap_update_obdo =       ll_ap_update_obdo,
441         .ap_completion =        ll_ap_completion,
442         .ap_lookup_capa =       ll_ap_lookup_capa,
443 };
444
445 struct ll_async_page *llap_cast_private(struct page *page)
446 {
447         struct ll_async_page *llap = (struct ll_async_page *)page_private(page);
448
449         LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
450                  "page %p private %lu gave magic %d which != %d\n",
451                  page, page_private(page), llap->llap_magic, LLAP_MAGIC);
452
453         return llap;
454 }
455
456 /* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
457  *
458  * There is an llap attached onto every page in lustre, linked off @sbi.
459  * We add an llap to the list so we don't lose our place during list walking.
460  * If llaps in the list are being moved they will only move to the end
461  * of the LRU, and we aren't terribly interested in those pages here (we
462  * start at the beginning of the list where the least-used llaps are.
463  */
464 int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
465 {
466         struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
467         unsigned long total, want, count = 0;
468
469         total = sbi->ll_async_page_count;
470
471         /* There can be a large number of llaps (600k or more in a large
472          * memory machine) so the VM 1/6 shrink ratio is likely too much.
473          * Since we are freeing pages also, we don't necessarily want to
474          * shrink so much.  Limit to 40MB of pages + llaps per call. */
475         if (shrink_fraction == 0)
476                 want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
477         else
478                 want = (total + shrink_fraction - 1) / shrink_fraction;
479
480         if (want > 40 << (20 - CFS_PAGE_SHIFT))
481                 want = 40 << (20 - CFS_PAGE_SHIFT);
482
483         CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
484                want, total, shrink_fraction);
485
486         spin_lock(&sbi->ll_lock);
487         list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
488
489         while (--total >= 0 && count < want) {
490                 struct page *page;
491                 int keep;
492
493                 if (unlikely(need_resched())) {
494                         spin_unlock(&sbi->ll_lock);
495                         cond_resched();
496                         spin_lock(&sbi->ll_lock);
497                 }
498
499                 llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
500                 list_del_init(&dummy_llap.llap_pglist_item);
501                 if (llap == NULL)
502                         break;
503
504                 page = llap->llap_page;
505                 LASSERT(page != NULL);
506
507                 list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
508
509                 /* Page needs/undergoing IO */
510                 if (TryLockPage(page)) {
511                         LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
512                         continue;
513                 }
514
515                keep = (llap->llap_write_queued || PageDirty(page) ||
516                       PageWriteback(page) || (!PageUptodate(page) &&
517                       llap->llap_origin != LLAP_ORIGIN_READAHEAD));
518
519                 LL_CDEBUG_PAGE(D_PAGE, page,"%s LRU page: %s%s%s%s%s origin %s\n",
520                                keep ? "keep" : "drop",
521                                llap->llap_write_queued ? "wq " : "",
522                                PageDirty(page) ? "pd " : "",
523                                PageUptodate(page) ? "" : "!pu ",
524                                PageWriteback(page) ? "wb" : "",
525                                llap->llap_defer_uptodate ? "" : "!du",
526                                llap_origins[llap->llap_origin]);
527
528                 /* If page is dirty or undergoing IO don't discard it */
529                 if (keep) {
530                         unlock_page(page);
531                         continue;
532                 }
533
534                 page_cache_get(page);
535                 spin_unlock(&sbi->ll_lock);
536
537                 if (page->mapping != NULL) {
538                         ll_teardown_mmaps(page->mapping,
539                                          (__u64)page->index << CFS_PAGE_SHIFT,
540                                          ((__u64)page->index << CFS_PAGE_SHIFT)|
541                                           ~CFS_PAGE_MASK);
542                         if (!PageDirty(page) && !page_mapped(page)) {
543                                 ll_ra_accounting(llap, page->mapping);
544                                 ll_truncate_complete_page(page);
545                                 ++count;
546                         } else {
547                                 LL_CDEBUG_PAGE(D_PAGE, page, "Not dropping page"
548                                                              " because it is "
549                                                              "%s\n",
550                                                               PageDirty(page)?
551                                                               "dirty":"mapped");
552                         }
553                 }
554                 unlock_page(page);
555                 page_cache_release(page);
556
557                 spin_lock(&sbi->ll_lock);
558         }
559         list_del(&dummy_llap.llap_pglist_item);
560         spin_unlock(&sbi->ll_lock);
561
562         CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
563                count, want, total);
564
565         return count;
566 }
567
568 struct ll_async_page *llap_from_page(struct page *page, unsigned origin)
569 {
570         struct ll_async_page *llap;
571         struct obd_export *exp;
572         struct inode *inode = page->mapping->host;
573         struct ll_sb_info *sbi;
574         int rc;
575         ENTRY;
576
577         if (!inode) {
578                 static int triggered;
579
580                 if (!triggered) {
581                         LL_CDEBUG_PAGE(D_ERROR, page, "Bug 10047. Wrong anon "
582                                        "page received\n");
583                         libcfs_debug_dumpstack(NULL);
584                         triggered = 1;
585                 }
586                 RETURN(ERR_PTR(-EINVAL));
587         }
588         sbi = ll_i2sbi(inode);
589         LASSERT(ll_async_page_slab);
590         LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
591
592         llap = llap_cast_private(page);
593         if (llap != NULL) {
594                 /* move to end of LRU list, except when page is just about to
595                  * die */
596                 if (origin != LLAP_ORIGIN_REMOVEPAGE) {
597                         spin_lock(&sbi->ll_lock);
598                         sbi->ll_pglist_gen++;
599                         list_del_init(&llap->llap_pglist_item);
600                         list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
601                         spin_unlock(&sbi->ll_lock);
602                 }
603                 GOTO(out, llap);
604         }
605
606         exp = ll_i2dtexp(page->mapping->host);
607         if (exp == NULL)
608                 RETURN(ERR_PTR(-EINVAL));
609
610         /* limit the number of lustre-cached pages */
611         if (sbi->ll_async_page_count >= sbi->ll_async_page_max)
612                 llap_shrink_cache(sbi, 0);
613
614         OBD_SLAB_ALLOC(llap, ll_async_page_slab, CFS_ALLOC_STD,
615                        ll_async_page_slab_size);
616         if (llap == NULL)
617                 RETURN(ERR_PTR(-ENOMEM));
618         llap->llap_magic = LLAP_MAGIC;
619         llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
620
621         rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
622                                  (obd_off)page->index << CFS_PAGE_SHIFT,
623                                  &ll_async_page_ops, llap, &llap->llap_cookie);
624         if (rc) {
625                 OBD_SLAB_FREE(llap, ll_async_page_slab,
626                               ll_async_page_slab_size);
627                 RETURN(ERR_PTR(rc));
628         }
629
630         CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
631                page, llap->llap_cookie, (obd_off)page->index << CFS_PAGE_SHIFT);
632         /* also zeroing the PRIVBITS low order bitflags */
633         __set_page_ll_data(page, llap);
634         llap->llap_page = page;
635         spin_lock(&sbi->ll_lock);
636         sbi->ll_pglist_gen++;
637         sbi->ll_async_page_count++;
638         list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
639         INIT_LIST_HEAD(&llap->llap_pending_write);
640         spin_unlock(&sbi->ll_lock);
641
642  out:
643         if (unlikely(sbi->ll_flags & LL_SBI_CHECKSUM)) {
644                 __u32 csum = 0;
645                 char *kaddr = kmap_atomic(page, KM_USER0);
646                 csum = crc32_le(csum, kaddr, CFS_PAGE_SIZE);
647                 kunmap_atomic(kaddr, KM_USER0);
648                 if (origin == LLAP_ORIGIN_READAHEAD ||
649                     origin == LLAP_ORIGIN_READPAGE) {
650                         llap->llap_checksum = 0;
651                 } else if (origin == LLAP_ORIGIN_COMMIT_WRITE ||
652                            llap->llap_checksum == 0) {
653                         llap->llap_checksum = csum;
654                         CDEBUG(D_PAGE, "page %p cksum %x\n", page, csum);
655                 } else if (llap->llap_checksum == csum) {
656                         /* origin == LLAP_ORIGIN_WRITEPAGE */
657                         CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
658                                page, csum);
659                 } else {
660                         /* origin == LLAP_ORIGIN_WRITEPAGE */
661                         LL_CDEBUG_PAGE(D_ERROR, page, "old cksum %x != new "
662                                        "%x!\n", llap->llap_checksum, csum);
663                 }
664         }
665
666         llap->llap_origin = origin;
667         RETURN(llap);
668 }
669
670 static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
671                                struct ll_async_page *llap,
672                                unsigned to, obd_flag async_flags)
673 {
674         unsigned long size_index = i_size_read(inode) >> CFS_PAGE_SHIFT;
675         struct obd_io_group *oig;
676         struct ll_sb_info *sbi = ll_i2sbi(inode);
677         int rc, noquot = llap->llap_ignore_quota ? OBD_BRW_NOQUOTA : 0;
678         ENTRY;
679
680         /* _make_ready only sees llap once we've unlocked the page */
681         llap->llap_write_queued = 1;
682         rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
683                                 llap->llap_cookie, OBD_BRW_WRITE | noquot,
684                                 0, 0, 0, async_flags);
685         if (rc == 0) {
686                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
687                 GOTO(out, 0);
688         }
689
690         llap->llap_write_queued = 0;
691         /* Do not pass llap here as it is sync write. */
692         llap_write_pending(inode, NULL);
693         
694         rc = oig_init(&oig);
695         if (rc)
696                 GOTO(out, rc);
697
698         /* make full-page requests if we are not at EOF (bug 4410) */
699         if (to != CFS_PAGE_SIZE && llap->llap_page->index < size_index) {
700                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
701                                "sync write before EOF: size_index %lu, to %d\n",
702                                size_index, to);
703                 to = CFS_PAGE_SIZE;
704         } else if (to != CFS_PAGE_SIZE && llap->llap_page->index == size_index) {
705                 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
706                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
707                                "sync write at EOF: size_index %lu, to %d/%d\n",
708                                size_index, to, size_to);
709                 if (to < size_to)
710                         to = size_to;
711         }
712
713         /* compare the checksum once before the page leaves llite */
714         if (unlikely((sbi->ll_flags & LL_SBI_CHECKSUM) &&
715                      llap->llap_checksum != 0)) {
716                 __u32 csum = 0;
717                 struct page *page = llap->llap_page;
718                 char *kaddr = kmap_atomic(page, KM_USER0);
719                 csum = crc32_le(csum, kaddr, CFS_PAGE_SIZE);
720                 kunmap_atomic(kaddr, KM_USER0);
721                 if (llap->llap_checksum == csum) {
722                         CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
723                                page, csum);
724                 } else {
725                         CERROR("page %p old cksum %x != new cksum %x!\n",
726                                page, llap->llap_checksum, csum);
727                 }
728         }
729
730         rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
731                                 llap->llap_cookie, OBD_BRW_WRITE | noquot,
732                                 0, to, 0, ASYNC_READY | ASYNC_URGENT |
733                                 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
734         if (rc)
735                 GOTO(free_oig, rc);
736
737         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
738         if (rc)
739                 GOTO(free_oig, rc);
740
741         rc = oig_wait(oig);
742
743         if (!rc && async_flags & ASYNC_READY) {
744                 unlock_page(llap->llap_page);
745                 if (PageWriteback(llap->llap_page)) {
746                         end_page_writeback(llap->llap_page);
747                 }
748         }
749
750         if (rc == 0 && llap_write_complete(inode, llap))
751                 ll_queue_done_writing(inode, 0);
752
753         LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
754
755 free_oig:
756         oig_release(oig);
757 out:
758         RETURN(rc);
759 }
760
761 /* update our write count to account for i_size increases that may have
762  * happened since we've queued the page for io. */
763
764 /* be careful not to return success without setting the page Uptodate or
765  * the next pass through prepare_write will read in stale data from disk. */
766 int ll_commit_write(struct file *file, struct page *page, unsigned from,
767                     unsigned to)
768 {
769         struct inode *inode = page->mapping->host;
770         struct ll_inode_info *lli = ll_i2info(inode);
771         struct lov_stripe_md *lsm = lli->lli_smd;
772         struct obd_export *exp;
773         struct ll_async_page *llap;
774         loff_t size;
775         int rc = 0;
776         ENTRY;
777
778         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
779         LASSERT(inode == file->f_dentry->d_inode);
780         LASSERT(PageLocked(page));
781
782         CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
783                inode, page, from, to, page->index);
784
785         llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
786         if (IS_ERR(llap))
787                 RETURN(PTR_ERR(llap));
788
789         exp = ll_i2dtexp(inode);
790         if (exp == NULL)
791                 RETURN(-EINVAL);
792
793         llap->llap_ignore_quota = capable(CAP_SYS_RESOURCE);
794
795         /*
796          * queue a write for some time in the future the first time we
797          * dirty the page.
798          *
799          * This is different from what other file systems do: they usually
800          * just mark page (and some of its buffers) dirty and rely on
801          * balance_dirty_pages() to start a write-back. Lustre wants write-back
802          * to be started earlier for the following reasons:
803          *
804          *     (1) with a large number of clients we need to limit the amount
805          *     of cached data on the clients a lot;
806          *
807          *     (2) large compute jobs generally want compute-only then io-only
808          *     and the IO should complete as quickly as possible;
809          *
810          *     (3) IO is batched up to the RPC size and is async until the
811          *     client max cache is hit
812          *     (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
813          *
814          */
815         if (!PageDirty(page)) {
816                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_MISSES, 1);
817
818                 rc = queue_or_sync_write(exp, inode, llap, to, 0);
819                 if (rc)
820                         GOTO(out, rc);
821         } else {
822                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_HITS, 1);
823         }
824
825         /* put the page in the page cache, from now on ll_removepage is
826          * responsible for cleaning up the llap.
827          * only set page dirty when it's queued to be write out */
828         if (llap->llap_write_queued)
829                 set_page_dirty(page);
830
831 out:
832         size = (((obd_off)page->index) << CFS_PAGE_SHIFT) + to;
833         ll_inode_size_lock(inode, 0);
834         if (rc == 0) {
835                 lov_stripe_lock(lsm);
836                 obd_adjust_kms(exp, lsm, size, 0);
837                 lov_stripe_unlock(lsm);
838                 if (size > i_size_read(inode))
839                         i_size_write(inode, size);
840                 SetPageUptodate(page);
841         } else if (size > i_size_read(inode)) {
842                 /* this page beyond the pales of i_size, so it can't be
843                  * truncated in ll_p_r_e during lock revoking. we must
844                  * teardown our book-keeping here. */
845                 ll_removepage(page);
846         }
847         ll_inode_size_unlock(inode, 0);
848         RETURN(rc);
849 }
850
851 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
852 {
853         struct ll_ra_info *ra = &sbi->ll_ra_info;
854         unsigned long ret;
855         ENTRY;
856
857         spin_lock(&sbi->ll_lock);
858         ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
859         ra->ra_cur_pages += ret;
860         spin_unlock(&sbi->ll_lock);
861
862         RETURN(ret);
863 }
864
865 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
866 {
867         struct ll_ra_info *ra = &sbi->ll_ra_info;
868         spin_lock(&sbi->ll_lock);
869         LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
870                  ra->ra_cur_pages, len);
871         ra->ra_cur_pages -= len;
872         spin_unlock(&sbi->ll_lock);
873 }
874
875 /* called for each page in a completed rpc.*/
876 int ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
877 {
878         struct ll_async_page *llap;
879         struct page *page;
880         int ret = 0;
881         ENTRY;
882
883         llap = LLAP_FROM_COOKIE(data);
884         page = llap->llap_page;
885         LASSERT(PageLocked(page));
886         LASSERT(CheckWriteback(page,cmd));
887         
888         LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
889
890         if (cmd & OBD_BRW_READ && llap->llap_defer_uptodate)
891                 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
892
893         if (rc == 0)  {
894                 if (cmd & OBD_BRW_READ) {
895                         if (!llap->llap_defer_uptodate)
896                                 SetPageUptodate(page);
897                 } else {
898                         llap->llap_write_queued = 0;
899                 }
900                 ClearPageError(page);
901         } else {
902                 if (cmd & OBD_BRW_READ) {
903                         llap->llap_defer_uptodate = 0;
904                 }
905                 SetPageError(page);
906                 if (rc == -ENOSPC)
907                         set_bit(AS_ENOSPC, &page->mapping->flags);
908                 else
909                         set_bit(AS_EIO, &page->mapping->flags);
910         }
911
912         unlock_page(page);
913
914         if (cmd & OBD_BRW_WRITE) {
915                 /* Only rc == 0, write succeed, then this page could be deleted
916                  * from the pending_writing list 
917                  */
918                 if (rc == 0 && llap_write_complete(page->mapping->host, llap))
919                         ll_queue_done_writing(page->mapping->host, 0);
920         }
921
922         if (PageWriteback(page)) {
923                 end_page_writeback(page);
924         }
925         page_cache_release(page);
926
927         RETURN(ret);
928 }
929
930 /* the kernel calls us here when a page is unhashed from the page cache.
931  * the page will be locked and the kernel is holding a spinlock, so
932  * we need to be careful.  we're just tearing down our book-keeping
933  * here. */
934 void ll_removepage(struct page *page)
935 {
936         struct inode *inode = page->mapping->host;
937         struct obd_export *exp;
938         struct ll_async_page *llap;
939         struct ll_sb_info *sbi = ll_i2sbi(inode);
940         int rc;
941         ENTRY;
942
943         LASSERT(!in_interrupt());
944
945         /* sync pages or failed read pages can leave pages in the page
946          * cache that don't have our data associated with them anymore */
947         if (page_private(page) == 0) {
948                 EXIT;
949                 return;
950         }
951
952         LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
953
954         exp = ll_i2dtexp(inode);
955         if (exp == NULL) {
956                 CERROR("page %p ind %lu gave null export\n", page, page->index);
957                 EXIT;
958                 return;
959         }
960
961         llap = llap_from_page(page, LLAP_ORIGIN_REMOVEPAGE);
962         if (IS_ERR(llap)) {
963                 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
964                        page->index, PTR_ERR(llap));
965                 EXIT;
966                 return;
967         }
968
969         if (llap_write_complete(inode, llap))
970                 ll_queue_done_writing(inode, 0);
971
972         rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
973                                      llap->llap_cookie);
974         if (rc != 0)
975                 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
976
977         /* this unconditional free is only safe because the page lock
978          * is providing exclusivity to memory pressure/truncate/writeback..*/
979         __clear_page_ll_data(page);
980
981         spin_lock(&sbi->ll_lock);
982         if (!list_empty(&llap->llap_pglist_item))
983                 list_del_init(&llap->llap_pglist_item);
984         sbi->ll_pglist_gen++;
985         sbi->ll_async_page_count--;
986         spin_unlock(&sbi->ll_lock);
987         OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
988         EXIT;
989 }
990
991 static int ll_page_matches(struct page *page, int fd_flags)
992 {
993         struct lustre_handle match_lockh = {0};
994         struct inode *inode = page->mapping->host;
995         ldlm_policy_data_t page_extent;
996         int flags, matches;
997         ENTRY;
998
999         if (unlikely(fd_flags & LL_FILE_GROUP_LOCKED))
1000                 RETURN(1);
1001
1002         page_extent.l_extent.start = (__u64)page->index << CFS_PAGE_SHIFT;
1003         page_extent.l_extent.end =
1004                 page_extent.l_extent.start + CFS_PAGE_SIZE - 1;
1005         flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
1006         if (!(fd_flags & LL_FILE_READAHEAD))
1007                 flags |= LDLM_FL_CBPENDING;
1008         matches = obd_match(ll_i2sbi(inode)->ll_dt_exp,
1009                             ll_i2info(inode)->lli_smd, LDLM_EXTENT,
1010                             &page_extent, LCK_PR | LCK_PW, &flags, inode,
1011                             &match_lockh);
1012         RETURN(matches);
1013 }
1014
1015 static int ll_issue_page_read(struct obd_export *exp,
1016                               struct ll_async_page *llap,
1017                               struct obd_io_group *oig, int defer)
1018 {
1019         struct page *page = llap->llap_page;
1020         int rc;
1021
1022         page_cache_get(page);
1023         llap->llap_defer_uptodate = defer;
1024         llap->llap_ra_used = 0;
1025         rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
1026                                 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
1027                                 CFS_PAGE_SIZE, 0, ASYNC_COUNT_STABLE |
1028                                                   ASYNC_READY | ASYNC_URGENT);
1029         if (rc) {
1030                 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
1031                 page_cache_release(page);
1032         }
1033         RETURN(rc);
1034 }
1035
1036 static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
1037 {
1038         LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
1039         ra->ra_stats[which]++;
1040 }
1041
1042 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
1043 {
1044         struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
1045         struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
1046
1047         spin_lock(&sbi->ll_lock);
1048         ll_ra_stats_inc_unlocked(ra, which);
1049         spin_unlock(&sbi->ll_lock);
1050 }
1051
1052 void ll_ra_accounting(struct ll_async_page *llap, struct address_space *mapping)
1053 {
1054         if (!llap->llap_defer_uptodate || llap->llap_ra_used)
1055                 return;
1056
1057         ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
1058 }
1059
1060 #define RAS_CDEBUG(ras) \
1061         CDEBUG(D_READA,                                                      \
1062                "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu\n", \
1063                ras->ras_last_readpage, ras->ras_consecutive_requests,        \
1064                ras->ras_consecutive_pages, ras->ras_window_start,            \
1065                ras->ras_window_len, ras->ras_next_readahead,                 \
1066                ras->ras_requests, ras->ras_request_index);
1067
1068 static int index_in_window(unsigned long index, unsigned long point,
1069                            unsigned long before, unsigned long after)
1070 {
1071         unsigned long start = point - before, end = point + after;
1072
1073         if (start > point)
1074                start = 0;
1075         if (end < point)
1076                end = ~0;
1077
1078         return start <= index && index <= end;
1079 }
1080
1081 static struct ll_readahead_state *ll_ras_get(struct file *f)
1082 {
1083         struct ll_file_data       *fd;
1084
1085         fd = LUSTRE_FPRIVATE(f);
1086         return &fd->fd_ras;
1087 }
1088
1089 void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
1090 {
1091         struct ll_readahead_state *ras;
1092
1093         ras = ll_ras_get(f);
1094
1095         spin_lock(&ras->ras_lock);
1096         ras->ras_requests++;
1097         ras->ras_request_index = 0;
1098         ras->ras_consecutive_requests++;
1099         rar->lrr_reader = current;
1100
1101         list_add(&rar->lrr_linkage, &ras->ras_read_beads);
1102         spin_unlock(&ras->ras_lock);
1103 }
1104
1105 void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
1106 {
1107         struct ll_readahead_state *ras;
1108
1109         ras = ll_ras_get(f);
1110
1111         spin_lock(&ras->ras_lock);
1112         list_del_init(&rar->lrr_linkage);
1113         spin_unlock(&ras->ras_lock);
1114 }
1115
1116 static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
1117 {
1118         struct ll_ra_read *scan;
1119
1120         list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
1121                 if (scan->lrr_reader == current)
1122                         return scan;
1123         }
1124         return NULL;
1125 }
1126
1127 struct ll_ra_read *ll_ra_read_get(struct file *f)
1128 {
1129         struct ll_readahead_state *ras;
1130         struct ll_ra_read         *bead;
1131
1132         ras = ll_ras_get(f);
1133
1134         spin_lock(&ras->ras_lock);
1135         bead = ll_ra_read_get_locked(ras);
1136         spin_unlock(&ras->ras_lock);
1137         return bead;
1138 }
1139
1140 static int ll_readahead(struct ll_readahead_state *ras,
1141                          struct obd_export *exp, struct address_space *mapping,
1142                          struct obd_io_group *oig, int flags)
1143 {
1144         unsigned long i, start = 0, end = 0, reserved;
1145         struct ll_async_page *llap;
1146         struct page *page;
1147         int rc, ret = 0, match_failed = 0;
1148         __u64 kms;
1149         unsigned int gfp_mask;
1150         struct inode *inode;
1151         struct lov_stripe_md *lsm;
1152         struct ll_ra_read *bead;
1153         struct ost_lvb lvb;
1154         ENTRY;
1155
1156         inode = mapping->host;
1157         lsm = ll_i2info(inode)->lli_smd;
1158
1159         lov_stripe_lock(lsm);
1160         inode_init_lvb(inode, &lvb);
1161         obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
1162         kms = lvb.lvb_size;
1163         lov_stripe_unlock(lsm);
1164         if (kms == 0) {
1165                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
1166                 RETURN(0);
1167         }
1168
1169         spin_lock(&ras->ras_lock);
1170         bead = ll_ra_read_get_locked(ras);
1171         /* Enlarge the RA window to encompass the full read */
1172         if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
1173             bead->lrr_start + bead->lrr_count) {
1174                 ras->ras_window_len = bead->lrr_start + bead->lrr_count -
1175                                       ras->ras_window_start;
1176         }
1177         /* Reserve a part of the read-ahead window that we'll be issuing */
1178         if (ras->ras_window_len) {
1179                 start = ras->ras_next_readahead;
1180                 end = ras->ras_window_start + ras->ras_window_len - 1;
1181         }
1182         if (end != 0) {
1183                 /* Truncate RA window to end of file */
1184                 end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
1185                 ras->ras_next_readahead = max(end, end + 1);
1186                 RAS_CDEBUG(ras);
1187         }
1188         spin_unlock(&ras->ras_lock);
1189
1190         if (end == 0) {
1191                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
1192                 RETURN(0);
1193         }
1194
1195         reserved = ll_ra_count_get(ll_i2sbi(inode), end - start + 1);
1196         if (reserved < end - start + 1)
1197                 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
1198
1199         gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
1200 #ifdef __GFP_NOWARN
1201         gfp_mask |= __GFP_NOWARN;
1202 #endif
1203
1204         for (i = start; reserved > 0 && !match_failed && i <= end; i++) {
1205                 /* skip locked pages from previous readpage calls */
1206                 page = grab_cache_page_nowait_gfp(mapping, i, gfp_mask);
1207                 if (page == NULL) {
1208                         ll_ra_stats_inc(mapping, RA_STAT_FAILED_GRAB_PAGE);
1209                         CDEBUG(D_READA, "g_c_p_n failed\n");
1210                         continue;
1211                 }
1212
1213                 /* Check if page was truncated or reclaimed */
1214                 if (page->mapping != mapping) {
1215                         ll_ra_stats_inc(mapping, RA_STAT_WRONG_GRAB_PAGE);
1216                         CDEBUG(D_READA, "g_c_p_n returned invalid page\n");
1217                         goto next_page;
1218                 }
1219
1220                 /* we do this first so that we can see the page in the /proc
1221                  * accounting */
1222                 llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
1223                 if (IS_ERR(llap) || llap->llap_defer_uptodate)
1224                         goto next_page;
1225
1226                 /* skip completed pages */
1227                 if (Page_Uptodate(page))
1228                         goto next_page;
1229
1230                 /* bail when we hit the end of the lock. */
1231                 if ((rc = ll_page_matches(page, flags|LL_FILE_READAHEAD)) <= 0){
1232                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
1233                                        "lock match failed: rc %d\n", rc);
1234                         ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
1235                         match_failed = 1;
1236                         goto next_page;
1237                 }
1238
1239                 rc = ll_issue_page_read(exp, llap, oig, 1);
1240                 if (rc == 0) {
1241                         reserved--;
1242                         ret++;
1243                         LL_CDEBUG_PAGE(D_READA| D_PAGE, page,
1244                                        "started read-ahead\n");
1245                 } else {
1246         next_page:
1247                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
1248                                        "skipping read-ahead\n");
1249
1250                         unlock_page(page);
1251                 }
1252                 page_cache_release(page);
1253         }
1254
1255         LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
1256         if (reserved != 0)
1257                 ll_ra_count_put(ll_i2sbi(inode), reserved);
1258         if (i == end + 1 && end == (kms >> CFS_PAGE_SHIFT))
1259                 ll_ra_stats_inc(mapping, RA_STAT_EOF);
1260
1261         /* if we didn't get to the end of the region we reserved from
1262          * the ras we need to go back and update the ras so that the
1263          * next read-ahead tries from where we left off.  we only do so
1264          * if the region we failed to issue read-ahead on is still ahead
1265          * of the app and behind the next index to start read-ahead from */
1266         if (i != end + 1) {
1267                 spin_lock(&ras->ras_lock);
1268                 if (i < ras->ras_next_readahead &&
1269                     index_in_window(i, ras->ras_window_start, 0,
1270                                     ras->ras_window_len)) {
1271                         ras->ras_next_readahead = i;
1272                         RAS_CDEBUG(ras);
1273                 }
1274                 spin_unlock(&ras->ras_lock);
1275         }
1276
1277         RETURN(ret);
1278 }
1279
1280 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
1281 {
1282         ras->ras_window_start = index & (~((1024 * 1024 >> CFS_PAGE_SHIFT) - 1));
1283 }
1284
1285 /* called with the ras_lock held or from places where it doesn't matter */
1286 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
1287 {
1288         ras->ras_last_readpage = index;
1289         ras->ras_consecutive_requests = 0;
1290         ras->ras_consecutive_pages = 0;
1291         ras->ras_window_len = 0;
1292         ras_set_start(ras, index);
1293         ras->ras_next_readahead = max(ras->ras_window_start, index);
1294
1295         RAS_CDEBUG(ras);
1296 }
1297
1298 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
1299 {
1300         spin_lock_init(&ras->ras_lock);
1301         ras_reset(ras, 0);
1302         ras->ras_requests = 0;
1303         INIT_LIST_HEAD(&ras->ras_read_beads);
1304 }
1305
1306 static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
1307                        struct ll_readahead_state *ras, unsigned long index,
1308                        unsigned hit)
1309 {
1310         struct ll_ra_info *ra = &sbi->ll_ra_info;
1311         int zero = 0;
1312         ENTRY;
1313
1314         spin_lock(&sbi->ll_lock);
1315         spin_lock(&ras->ras_lock);
1316
1317         ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
1318
1319         /* reset the read-ahead window in two cases.  First when the app seeks
1320          * or reads to some other part of the file.  Secondly if we get a
1321          * read-ahead miss that we think we've previously issued.  This can
1322          * be a symptom of there being so many read-ahead pages that the VM is
1323          * reclaiming it before we get to it. */
1324         if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
1325                 zero = 1;
1326                 ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
1327         } else if (!hit && ras->ras_window_len &&
1328                    index < ras->ras_next_readahead &&
1329                    index_in_window(index, ras->ras_window_start, 0,
1330                                    ras->ras_window_len)) {
1331                 zero = 1;
1332                 ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
1333         }
1334
1335         /* On the second access to a file smaller than the tunable
1336          * ra_max_read_ahead_whole_pages trigger RA on all pages in the
1337          * file up to ra_max_pages.  This is simply a best effort and
1338          * only occurs once per open file.  Normal RA behavior is reverted
1339          * to for subsequent IO.  The mmap case does not increment
1340          * ras_requests and thus can never trigger this behavior. */
1341         if (ras->ras_requests == 2 && !ras->ras_request_index) {
1342                 __u64 kms_pages;
1343
1344                 kms_pages = (i_size_read(inode) + CFS_PAGE_SIZE - 1) >>
1345                             CFS_PAGE_SHIFT;
1346
1347                 CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages,
1348                        ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages);
1349
1350                 if (kms_pages &&
1351                     kms_pages <= ra->ra_max_read_ahead_whole_pages) {
1352                         ras->ras_window_start = 0;
1353                         ras->ras_last_readpage = 0;
1354                         ras->ras_next_readahead = 0;
1355                         ras->ras_window_len = min(ra->ra_max_pages,
1356                                 ra->ra_max_read_ahead_whole_pages);
1357                         GOTO(out_unlock, 0);
1358                 }
1359         }
1360
1361         if (zero) {
1362                 ras_reset(ras, index);
1363                 GOTO(out_unlock, 0);
1364         }
1365
1366         ras->ras_last_readpage = index;
1367         ras->ras_consecutive_pages++;
1368         ras_set_start(ras, index);
1369         ras->ras_next_readahead = max(ras->ras_window_start,
1370                                       ras->ras_next_readahead);
1371
1372         /* Trigger RA in the mmap case where ras_consecutive_requests
1373          * is not incremented and thus can't be used to trigger RA */
1374         if (!ras->ras_window_len && ras->ras_consecutive_pages == 3) {
1375                 ras->ras_window_len = 1024 * 1024 >> CFS_PAGE_SHIFT;
1376                 GOTO(out_unlock, 0);
1377         }
1378
1379         /* The initial ras_window_len is set to the request size.  To avoid
1380          * uselessly reading and discarding pages for random IO the window is
1381          * only increased once per consecutive request received. */
1382         if (ras->ras_consecutive_requests > 1 && !ras->ras_request_index) {
1383                 ras->ras_window_len = min(ras->ras_window_len +
1384                                           (1024 * 1024 >> CFS_PAGE_SHIFT),
1385                                           ra->ra_max_pages);
1386         }
1387
1388         EXIT;
1389 out_unlock:
1390         RAS_CDEBUG(ras);
1391         ras->ras_request_index++;
1392         spin_unlock(&ras->ras_lock);
1393         spin_unlock(&sbi->ll_lock);
1394         return;
1395 }
1396
1397 int ll_writepage(struct page *page)
1398 {
1399         struct inode *inode = page->mapping->host;
1400         struct ll_inode_info *lli = ll_i2info(inode);
1401         struct obd_export *exp;
1402         struct ll_async_page *llap;
1403         int rc = 0;
1404         ENTRY;
1405
1406         LASSERT(!PageDirty(page));
1407         LASSERT(PageLocked(page));
1408
1409         exp = ll_i2dtexp(inode);
1410         if (exp == NULL)
1411                 GOTO(out, rc = -EINVAL);
1412
1413         llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
1414         if (IS_ERR(llap))
1415                 GOTO(out, rc = PTR_ERR(llap));
1416
1417         LASSERT(!PageWriteback(page));
1418         set_page_writeback(page);
1419
1420         page_cache_get(page);
1421         if (llap->llap_write_queued) {
1422                 LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
1423                 rc = obd_set_async_flags(exp, lli->lli_smd, NULL,
1424                                          llap->llap_cookie,
1425                                          ASYNC_READY | ASYNC_URGENT);
1426         } else {
1427                 rc = queue_or_sync_write(exp, inode, llap, CFS_PAGE_SIZE,
1428                                          ASYNC_READY | ASYNC_URGENT);
1429         }
1430         if (rc)
1431                 page_cache_release(page);
1432 out:
1433         if (rc) {
1434                 if (!lli->lli_async_rc)
1435                         lli->lli_async_rc = rc;
1436                 /* re-dirty page on error so it retries write */
1437                 if (PageWriteback(page)) {
1438                         end_page_writeback(page);
1439                 }
1440                 /* resend page only for not started IO*/
1441                 if (!PageError(page))
1442                         ll_redirty_page(page);
1443                 unlock_page(page);
1444         }
1445         RETURN(rc);
1446 }
1447
1448 /*
1449  * for now we do our readpage the same on both 2.4 and 2.5.  The kernel's
1450  * read-ahead assumes it is valid to issue readpage all the way up to
1451  * i_size, but our dlm locks make that not the case.  We disable the
1452  * kernel's read-ahead and do our own by walking ahead in the page cache
1453  * checking for dlm lock coverage.  the main difference between 2.4 and
1454  * 2.6 is how read-ahead gets batched and issued, but we're using our own,
1455  * so they look the same.
1456  */
1457 int ll_readpage(struct file *filp, struct page *page)
1458 {
1459         struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
1460         struct inode *inode = page->mapping->host;
1461         struct obd_export *exp;
1462         struct ll_async_page *llap;
1463         struct obd_io_group *oig = NULL;
1464         int rc;
1465         ENTRY;
1466
1467         LASSERT(PageLocked(page));
1468         LASSERT(!PageUptodate(page));
1469         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset=%Lu=%#Lx\n",
1470                inode->i_ino, inode->i_generation, inode,
1471                (((loff_t)page->index) << CFS_PAGE_SHIFT),
1472                (((loff_t)page->index) << CFS_PAGE_SHIFT));
1473         LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
1474
1475         if (!ll_i2info(inode)->lli_smd) {
1476                 /* File with no objects - one big hole */
1477                 /* We use this just for remove_from_page_cache that is not
1478                  * exported, we'd make page back up to date. */
1479                 ll_truncate_complete_page(page);
1480                 clear_page(kmap(page));
1481                 kunmap(page);
1482                 SetPageUptodate(page);
1483                 unlock_page(page);
1484                 RETURN(0);
1485         }
1486
1487         rc = oig_init(&oig);
1488         if (rc < 0)
1489                 GOTO(out, rc);
1490
1491         exp = ll_i2dtexp(inode);
1492         if (exp == NULL)
1493                 GOTO(out, rc = -EINVAL);
1494
1495         llap = llap_from_page(page, LLAP_ORIGIN_READPAGE);
1496         if (IS_ERR(llap))
1497                 GOTO(out, rc = PTR_ERR(llap));
1498
1499         if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1500                 ras_update(ll_i2sbi(inode), inode, &fd->fd_ras, page->index,
1501                            llap->llap_defer_uptodate);
1502
1503
1504         if (llap->llap_defer_uptodate) {
1505                 /* This is the callpath if we got the page from a readahead */
1506                 llap->llap_ra_used = 1;
1507                 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1508                                   fd->fd_flags);
1509                 if (rc > 0)
1510                         obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
1511                                              NULL, oig);
1512                 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
1513                 SetPageUptodate(page);
1514                 unlock_page(page);
1515                 GOTO(out_oig, rc = 0);
1516         }
1517
1518         if (likely((fd->fd_flags & LL_FILE_IGNORE_LOCK) == 0)) {
1519                 rc = ll_page_matches(page, fd->fd_flags);
1520                 if (rc < 0) {
1521                         LL_CDEBUG_PAGE(D_ERROR, page,
1522                                        "lock match failed: rc %d\n", rc);
1523                         GOTO(out, rc);
1524                 }
1525
1526                 if (rc == 0) {
1527                         CWARN("ino %lu page %lu (%llu) not covered by "
1528                               "a lock (mmap?).  check debug logs.\n",
1529                               inode->i_ino, page->index,
1530                               (long long)page->index << CFS_PAGE_SHIFT);
1531                 }
1532         }
1533
1534         rc = ll_issue_page_read(exp, llap, oig, 0);
1535         if (rc)
1536                 GOTO(out, rc);
1537
1538         LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
1539         /* We have just requested the actual page we want, see if we can tack
1540          * on some readahead to that page's RPC before it is sent. */
1541         if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1542                 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1543                              fd->fd_flags);
1544
1545         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
1546
1547 out:
1548         if (rc)
1549                 unlock_page(page);
1550 out_oig:
1551         if (oig != NULL)
1552                 oig_release(oig);
1553         RETURN(rc);
1554 }