Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / llite / rw.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Lite I/O page cache routines shared by different kernel revs
5  *
6  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23 #ifndef AUTOCONF_INCLUDED
24 #include <linux/config.h>
25 #endif
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/string.h>
29 #include <linux/stat.h>
30 #include <linux/errno.h>
31 #include <linux/smp_lock.h>
32 #include <linux/unistd.h>
33 #include <linux/version.h>
34 #include <asm/system.h>
35 #include <asm/uaccess.h>
36
37 #include <linux/fs.h>
38 #include <linux/stat.h>
39 #include <asm/uaccess.h>
40 #ifdef HAVE_SEGMENT_H
41 # include <asm/segment.h>
42 #endif
43 #include <linux/mm.h>
44 #include <linux/pagemap.h>
45 #include <linux/smp_lock.h>
46
47 #define DEBUG_SUBSYSTEM S_LLITE
48
49 //#include <lustre_mdc.h>
50 #include <lustre_lite.h>
51 #include "llite_internal.h"
52 #include <linux/lustre_compat25.h>
53
54 #ifndef list_for_each_prev_safe
55 #define list_for_each_prev_safe(pos, n, head) \
56         for (pos = (head)->prev, n = pos->prev; pos != (head); \
57                 pos = n, n = pos->prev )
58 #endif
59
60 cfs_mem_cache_t *ll_async_page_slab = NULL;
61 size_t ll_async_page_slab_size = 0;
62
63 /* SYNCHRONOUS I/O to object storage for an inode */
64 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
65                   struct page *page, int flags)
66 {
67         struct ll_inode_info *lli = ll_i2info(inode);
68         struct lov_stripe_md *lsm = lli->lli_smd;
69         struct obd_info oinfo = { { { 0 } } };
70         struct brw_page pg;
71         int opc, rc;
72         ENTRY;
73
74         pg.pg = page;
75         pg.off = ((obd_off)page->index) << CFS_PAGE_SHIFT;
76
77         if ((cmd & OBD_BRW_WRITE) && (pg.off+CFS_PAGE_SIZE>i_size_read(inode)))
78                 pg.count = i_size_read(inode) % CFS_PAGE_SIZE;
79         else
80                 pg.count = CFS_PAGE_SIZE;
81
82         LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
83                        cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
84                        inode->i_ino, pg.off, pg.off);
85         if (pg.count == 0) {
86                 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
87                        LPU64"\n", inode->i_ino, inode, i_size_read(inode),
88                        page->mapping->host, i_size_read(page->mapping->host),
89                        page->index, pg.off);
90         }
91
92         pg.flag = flags;
93
94         if (cmd & OBD_BRW_WRITE)
95                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_WRITE,
96                                    pg.count);
97         else
98                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_READ,
99                                    pg.count);
100         oinfo.oi_oa = oa;
101         oinfo.oi_md = lsm;
102         /* NB partial write, so we might not have CAPA_OPC_OSS_READ capa */
103         opc = cmd & OBD_BRW_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
104         oinfo.oi_capa = ll_osscapa_get(inode, opc);
105         rc = obd_brw(cmd, ll_i2dtexp(inode), &oinfo, 1, &pg, NULL);
106         capa_put(oinfo.oi_capa);
107         if (rc == 0)
108                 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
109         else if (rc != -EIO)
110                 CERROR("error from obd_brw: rc = %d\n", rc);
111         RETURN(rc);
112 }
113
114 /* this isn't where truncate starts.   roughly:
115  * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate. setattr_raw grabs
116  * DLM lock on [size, EOF], i_mutex, ->lli_size_sem, and WRITE_I_ALLOC_SEM to
117  * avoid races.
118  *
119  * must be called under ->lli_size_sem */
120 void ll_truncate(struct inode *inode)
121 {
122         struct ll_inode_info *lli = ll_i2info(inode);
123         struct obd_info oinfo = { { { 0 } } };
124         struct ost_lvb lvb;
125         struct obdo oa;
126         int rc;
127         ENTRY;
128         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu=%#Lx\n",inode->i_ino,
129                inode->i_generation, inode, i_size_read(inode),
130                i_size_read(inode));
131
132         ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
133         if (lli->lli_size_sem_owner != current) {
134                 EXIT;
135                 return;
136         }
137
138         if (!lli->lli_smd) {
139                 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
140                        inode->i_ino);
141                 GOTO(out_unlock, 0);
142         }
143
144         LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
145
146         /* XXX I'm pretty sure this is a hack to paper over a more fundamental
147          * race condition. */
148         lov_stripe_lock(lli->lli_smd);
149         inode_init_lvb(inode, &lvb);
150         rc = obd_merge_lvb(ll_i2dtexp(inode), lli->lli_smd, &lvb, 0);
151         if (lvb.lvb_size == i_size_read(inode) && rc == 0) {
152                 CDEBUG(D_VFSTRACE, "skipping punch for obj "LPX64", %Lu=%#Lx\n",
153                        lli->lli_smd->lsm_object_id, i_size_read(inode),
154                        i_size_read(inode));
155                 lov_stripe_unlock(lli->lli_smd);
156                 GOTO(out_unlock, 0);
157         }
158
159         obd_adjust_kms(ll_i2dtexp(inode), lli->lli_smd, i_size_read(inode), 1);
160         lov_stripe_unlock(lli->lli_smd);
161
162         if (unlikely((ll_i2sbi(inode)->ll_flags & LL_SBI_CHECKSUM) &&
163                      (i_size_read(inode) & ~CFS_PAGE_MASK))) {
164                 /* If the truncate leaves behind a partial page, update its
165                  * checksum. */
166                 struct page *page = find_get_page(inode->i_mapping,
167                                                   i_size_read(inode) >>
168                                                   CFS_PAGE_SHIFT);
169                 if (page != NULL) {
170                         struct ll_async_page *llap = llap_cast_private(page);
171                         if (llap != NULL) {
172                                 char *kaddr = kmap_atomic(page, KM_USER0);
173                                 llap->llap_checksum =
174                                         crc32_le(0, kaddr, CFS_PAGE_SIZE);
175                                 kunmap_atomic(kaddr, KM_USER0);
176                         }
177                         page_cache_release(page);
178                 }
179         }
180
181         CDEBUG(D_INFO, "calling punch for "LPX64" (new size %Lu=%#Lx)\n",
182                lli->lli_smd->lsm_object_id, i_size_read(inode), i_size_read(inode));
183
184         oinfo.oi_md = lli->lli_smd;
185         oinfo.oi_policy.l_extent.start = i_size_read(inode);
186         oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
187         oinfo.oi_oa = &oa;
188         oa.o_id = lli->lli_smd->lsm_object_id;
189         oa.o_gr = lli->lli_smd->lsm_object_gr;
190         oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
191
192         obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |
193                         OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME |
194                         OBD_MD_FLFID | OBD_MD_FLGENER);
195
196         ll_inode_size_unlock(inode, 0);
197
198         oinfo.oi_capa = ll_osscapa_get(inode, CAPA_OPC_OSS_TRUNC);
199         rc = obd_punch_rqset(ll_i2dtexp(inode), &oinfo, NULL);
200         ll_truncate_free_capa(oinfo.oi_capa);
201         if (rc)
202                 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
203         else
204                 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
205                               OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
206         EXIT;
207         return;
208
209  out_unlock:
210         ll_inode_size_unlock(inode, 0);
211 } /* ll_truncate */
212
213 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
214                      unsigned to)
215 {
216         struct inode *inode = page->mapping->host;
217         struct ll_inode_info *lli = ll_i2info(inode);
218         struct lov_stripe_md *lsm = lli->lli_smd;
219         obd_off offset = ((obd_off)page->index) << CFS_PAGE_SHIFT;
220         struct obd_info oinfo = { { { 0 } } };
221         struct brw_page pga;
222         struct obdo oa;
223         struct ost_lvb lvb;
224         int rc = 0;
225         ENTRY;
226
227         LASSERT(PageLocked(page));
228         (void)llap_cast_private(page); /* assertion */
229
230         /* Check to see if we should return -EIO right away */
231         pga.pg = page;
232         pga.off = offset;
233         pga.count = CFS_PAGE_SIZE;
234         pga.flag = 0;
235
236         oa.o_mode = inode->i_mode;
237         oa.o_id = lsm->lsm_object_id;
238         oa.o_gr = lsm->lsm_object_gr;
239         oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | 
240                      OBD_MD_FLTYPE | OBD_MD_FLGROUP;
241         obdo_from_inode(&oa, inode, OBD_MD_FLFID | OBD_MD_FLGENER);
242
243         oinfo.oi_oa = &oa;
244         oinfo.oi_md = lsm;
245         rc = obd_brw(OBD_BRW_CHECK, ll_i2dtexp(inode), &oinfo, 1, &pga, NULL);
246         if (rc)
247                 RETURN(rc);
248
249         if (PageUptodate(page)) {
250                 LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
251                 RETURN(0);
252         }
253
254         /* We're completely overwriting an existing page, so _don't_ set it up
255          * to date until commit_write */
256         if (from == 0 && to == CFS_PAGE_SIZE) {
257                 LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
258                 POISON_PAGE(page, 0x11);
259                 RETURN(0);
260         }
261
262         /* If are writing to a new page, no need to read old data.  The extent
263          * locking will have updated the KMS, and for our purposes here we can
264          * treat it like i_size. */
265         lov_stripe_lock(lsm);
266         inode_init_lvb(inode, &lvb);
267         obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
268         lov_stripe_unlock(lsm);
269         if (lvb.lvb_size <= offset) {
270                 char *kaddr = kmap_atomic(page, KM_USER0);
271                 LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
272                                lvb.lvb_size, offset);
273                 memset(kaddr, 0, CFS_PAGE_SIZE);
274                 kunmap_atomic(kaddr, KM_USER0);
275                 GOTO(prepare_done, rc = 0);
276         }
277
278         /* XXX could be an async ocp read.. read-ahead? */
279         rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
280         if (rc == 0) {
281                 /* bug 1598: don't clobber blksize */
282                 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
283                 obdo_refresh_inode(inode, &oa, oa.o_valid);
284         }
285
286         EXIT;
287  prepare_done:
288         if (rc == 0)
289                 SetPageUptodate(page);
290
291         return rc;
292 }
293
294 static int ll_ap_make_ready(void *data, int cmd)
295 {
296         struct ll_async_page *llap;
297         struct page *page;
298         ENTRY;
299
300         llap = LLAP_FROM_COOKIE(data);
301         page = llap->llap_page;
302
303         LASSERTF(!(cmd & OBD_BRW_READ), "cmd %x page %p ino %lu index %lu\n", cmd, page,
304                  page->mapping->host->i_ino, page->index);
305
306         /* we're trying to write, but the page is locked.. come back later */
307         if (TryLockPage(page))
308                 RETURN(-EAGAIN);
309
310         LASSERT(!PageWriteback(page));
311
312         /* if we left PageDirty we might get another writepage call
313          * in the future.  list walkers are bright enough
314          * to check page dirty so we can leave it on whatever list
315          * its on.  XXX also, we're called with the cli list so if
316          * we got the page cache list we'd create a lock inversion
317          * with the removepage path which gets the page lock then the
318          * cli lock */
319         LASSERTF(!PageWriteback(page),"cmd %x page %p ino %lu index %lu\n", cmd, page,
320                  page->mapping->host->i_ino, page->index);
321         clear_page_dirty_for_io(page);
322
323         /* This actually clears the dirty bit in the radix tree.*/
324         set_page_writeback(page);
325
326         LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
327         page_cache_get(page);
328
329         RETURN(0);
330 }
331
332 /* We have two reasons for giving llite the opportunity to change the
333  * write length of a given queued page as it builds the RPC containing
334  * the page:
335  *
336  * 1) Further extending writes may have landed in the page cache
337  *    since a partial write first queued this page requiring us
338  *    to write more from the page cache.  (No further races are possible, since
339  *    by the time this is called, the page is locked.)
340  * 2) We might have raced with truncate and want to avoid performing
341  *    write RPCs that are just going to be thrown away by the
342  *    truncate's punch on the storage targets.
343  *
344  * The kms serves these purposes as it is set at both truncate and extending
345  * writes.
346  */
347 static int ll_ap_refresh_count(void *data, int cmd)
348 {
349         struct ll_inode_info *lli;
350         struct ll_async_page *llap;
351         struct lov_stripe_md *lsm;
352         struct page *page;
353         struct inode *inode;
354         struct ost_lvb lvb;
355         __u64 kms;
356         ENTRY;
357
358         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
359         LASSERT(cmd != OBD_BRW_READ);
360
361         llap = LLAP_FROM_COOKIE(data);
362         page = llap->llap_page;
363         inode = page->mapping->host;
364         lli = ll_i2info(inode);
365         lsm = lli->lli_smd;
366
367         lov_stripe_lock(lsm);
368         inode_init_lvb(inode, &lvb);
369         obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
370         kms = lvb.lvb_size;
371         lov_stripe_unlock(lsm);
372
373         /* catch race with truncate */
374         if (((__u64)page->index << CFS_PAGE_SHIFT) >= kms)
375                 return 0;
376
377         /* catch sub-page write at end of file */
378         if (((__u64)page->index << CFS_PAGE_SHIFT) + CFS_PAGE_SIZE > kms)
379                 return kms % CFS_PAGE_SIZE;
380
381         return CFS_PAGE_SIZE;
382 }
383
384 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
385 {
386         struct lov_stripe_md *lsm;
387         obd_flag valid_flags;
388
389         lsm = ll_i2info(inode)->lli_smd;
390
391         oa->o_id = lsm->lsm_object_id;
392         oa->o_gr = lsm->lsm_object_gr;
393         oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
394         valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
395         if (cmd & OBD_BRW_WRITE) {
396                 oa->o_valid |= OBD_MD_FLEPOCH;
397                 oa->o_easize = ll_i2info(inode)->lli_ioepoch;
398
399                 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
400                         OBD_MD_FLUID | OBD_MD_FLGID |
401                         OBD_MD_FLFID | OBD_MD_FLGENER;
402         }
403
404         obdo_from_inode(oa, inode, valid_flags);
405 }
406
407 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
408 {
409         struct ll_async_page *llap;
410         ENTRY;
411
412         llap = LLAP_FROM_COOKIE(data);
413         ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
414
415         EXIT;
416 }
417
418 static void ll_ap_update_obdo(void *data, int cmd, struct obdo *oa,
419                               obd_valid valid)
420 {
421         struct ll_async_page *llap;
422         ENTRY;
423
424         llap = LLAP_FROM_COOKIE(data);
425         obdo_from_inode(oa, llap->llap_page->mapping->host, valid);
426
427         EXIT;
428 }
429
430 static struct obd_capa *ll_ap_lookup_capa(void *data, int cmd)
431 {
432         struct ll_async_page *llap = LLAP_FROM_COOKIE(data);
433         int opc = cmd & OBD_BRW_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
434
435         return ll_osscapa_get(llap->llap_page->mapping->host, opc);
436 }
437
438 static struct obd_async_page_ops ll_async_page_ops = {
439         .ap_make_ready =        ll_ap_make_ready,
440         .ap_refresh_count =     ll_ap_refresh_count,
441         .ap_fill_obdo =         ll_ap_fill_obdo,
442         .ap_update_obdo =       ll_ap_update_obdo,
443         .ap_completion =        ll_ap_completion,
444         .ap_lookup_capa =       ll_ap_lookup_capa,
445 };
446
447 struct ll_async_page *llap_cast_private(struct page *page)
448 {
449         struct ll_async_page *llap = (struct ll_async_page *)page_private(page);
450
451         LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
452                  "page %p private %lu gave magic %d which != %d\n",
453                  page, page_private(page), llap->llap_magic, LLAP_MAGIC);
454
455         return llap;
456 }
457
458 /* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
459  *
460  * There is an llap attached onto every page in lustre, linked off @sbi.
461  * We add an llap to the list so we don't lose our place during list walking.
462  * If llaps in the list are being moved they will only move to the end
463  * of the LRU, and we aren't terribly interested in those pages here (we
464  * start at the beginning of the list where the least-used llaps are.
465  */
466 int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
467 {
468         struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
469         unsigned long total, want, count = 0;
470
471         total = sbi->ll_async_page_count;
472
473         /* There can be a large number of llaps (600k or more in a large
474          * memory machine) so the VM 1/6 shrink ratio is likely too much.
475          * Since we are freeing pages also, we don't necessarily want to
476          * shrink so much.  Limit to 40MB of pages + llaps per call. */
477         if (shrink_fraction == 0)
478                 want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
479         else
480                 want = (total + shrink_fraction - 1) / shrink_fraction;
481
482         if (want > 40 << (20 - CFS_PAGE_SHIFT))
483                 want = 40 << (20 - CFS_PAGE_SHIFT);
484
485         CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
486                want, total, shrink_fraction);
487
488         spin_lock(&sbi->ll_lock);
489         list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
490
491         while (--total >= 0 && count < want) {
492                 struct page *page;
493                 int keep;
494
495                 if (unlikely(need_resched())) {
496                         spin_unlock(&sbi->ll_lock);
497                         cond_resched();
498                         spin_lock(&sbi->ll_lock);
499                 }
500
501                 llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
502                 list_del_init(&dummy_llap.llap_pglist_item);
503                 if (llap == NULL)
504                         break;
505
506                 page = llap->llap_page;
507                 LASSERT(page != NULL);
508
509                 list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
510
511                 /* Page needs/undergoing IO */
512                 if (TryLockPage(page)) {
513                         LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
514                         continue;
515                 }
516
517                keep = (llap->llap_write_queued || PageDirty(page) ||
518                       PageWriteback(page) || (!PageUptodate(page) &&
519                       llap->llap_origin != LLAP_ORIGIN_READAHEAD));
520
521                 LL_CDEBUG_PAGE(D_PAGE, page,"%s LRU page: %s%s%s%s%s origin %s\n",
522                                keep ? "keep" : "drop",
523                                llap->llap_write_queued ? "wq " : "",
524                                PageDirty(page) ? "pd " : "",
525                                PageUptodate(page) ? "" : "!pu ",
526                                PageWriteback(page) ? "wb" : "",
527                                llap->llap_defer_uptodate ? "" : "!du",
528                                llap_origins[llap->llap_origin]);
529
530                 /* If page is dirty or undergoing IO don't discard it */
531                 if (keep) {
532                         unlock_page(page);
533                         continue;
534                 }
535
536                 page_cache_get(page);
537                 spin_unlock(&sbi->ll_lock);
538
539                 if (page->mapping != NULL) {
540                         ll_teardown_mmaps(page->mapping,
541                                          (__u64)page->index << CFS_PAGE_SHIFT,
542                                          ((__u64)page->index << CFS_PAGE_SHIFT)|
543                                           ~CFS_PAGE_MASK);
544                         if (!PageDirty(page) && !page_mapped(page)) {
545                                 ll_ra_accounting(llap, page->mapping);
546                                 ll_truncate_complete_page(page);
547                                 ++count;
548                         } else {
549                                 LL_CDEBUG_PAGE(D_PAGE, page, "Not dropping page"
550                                                              " because it is "
551                                                              "%s\n",
552                                                               PageDirty(page)?
553                                                               "dirty":"mapped");
554                         }
555                 }
556                 unlock_page(page);
557                 page_cache_release(page);
558
559                 spin_lock(&sbi->ll_lock);
560         }
561         list_del(&dummy_llap.llap_pglist_item);
562         spin_unlock(&sbi->ll_lock);
563
564         CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
565                count, want, total);
566
567         return count;
568 }
569
570 struct ll_async_page *llap_from_page(struct page *page, unsigned origin)
571 {
572         struct ll_async_page *llap;
573         struct obd_export *exp;
574         struct inode *inode = page->mapping->host;
575         struct ll_sb_info *sbi;
576         int rc;
577         ENTRY;
578
579         if (!inode) {
580                 static int triggered;
581
582                 if (!triggered) {
583                         LL_CDEBUG_PAGE(D_ERROR, page, "Bug 10047. Wrong anon "
584                                        "page received\n");
585                         libcfs_debug_dumpstack(NULL);
586                         triggered = 1;
587                 }
588                 RETURN(ERR_PTR(-EINVAL));
589         }
590         sbi = ll_i2sbi(inode);
591         LASSERT(ll_async_page_slab);
592         LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
593
594         llap = llap_cast_private(page);
595         if (llap != NULL) {
596                 /* move to end of LRU list, except when page is just about to
597                  * die */
598                 if (origin != LLAP_ORIGIN_REMOVEPAGE) {
599                         spin_lock(&sbi->ll_lock);
600                         sbi->ll_pglist_gen++;
601                         list_del_init(&llap->llap_pglist_item);
602                         list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
603                         spin_unlock(&sbi->ll_lock);
604                 }
605                 GOTO(out, llap);
606         }
607
608         exp = ll_i2dtexp(page->mapping->host);
609         if (exp == NULL)
610                 RETURN(ERR_PTR(-EINVAL));
611
612         /* limit the number of lustre-cached pages */
613         if (sbi->ll_async_page_count >= sbi->ll_async_page_max)
614                 llap_shrink_cache(sbi, 0);
615
616         OBD_SLAB_ALLOC(llap, ll_async_page_slab, CFS_ALLOC_STD,
617                        ll_async_page_slab_size);
618         if (llap == NULL)
619                 RETURN(ERR_PTR(-ENOMEM));
620         llap->llap_magic = LLAP_MAGIC;
621         llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
622
623         rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
624                                  (obd_off)page->index << CFS_PAGE_SHIFT,
625                                  &ll_async_page_ops, llap, &llap->llap_cookie);
626         if (rc) {
627                 OBD_SLAB_FREE(llap, ll_async_page_slab,
628                               ll_async_page_slab_size);
629                 RETURN(ERR_PTR(rc));
630         }
631
632         CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
633                page, llap->llap_cookie, (obd_off)page->index << CFS_PAGE_SHIFT);
634         /* also zeroing the PRIVBITS low order bitflags */
635         __set_page_ll_data(page, llap);
636         llap->llap_page = page;
637         spin_lock(&sbi->ll_lock);
638         sbi->ll_pglist_gen++;
639         sbi->ll_async_page_count++;
640         list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
641         INIT_LIST_HEAD(&llap->llap_pending_write);
642         spin_unlock(&sbi->ll_lock);
643
644  out:
645         if (unlikely(sbi->ll_flags & LL_SBI_CHECKSUM)) {
646                 __u32 csum = 0;
647                 char *kaddr = kmap_atomic(page, KM_USER0);
648                 csum = crc32_le(csum, kaddr, CFS_PAGE_SIZE);
649                 kunmap_atomic(kaddr, KM_USER0);
650                 if (origin == LLAP_ORIGIN_READAHEAD ||
651                     origin == LLAP_ORIGIN_READPAGE) {
652                         llap->llap_checksum = 0;
653                 } else if (origin == LLAP_ORIGIN_COMMIT_WRITE ||
654                            llap->llap_checksum == 0) {
655                         llap->llap_checksum = csum;
656                         CDEBUG(D_PAGE, "page %p cksum %x\n", page, csum);
657                 } else if (llap->llap_checksum == csum) {
658                         /* origin == LLAP_ORIGIN_WRITEPAGE */
659                         CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
660                                page, csum);
661                 } else {
662                         /* origin == LLAP_ORIGIN_WRITEPAGE */
663                         LL_CDEBUG_PAGE(D_ERROR, page, "old cksum %x != new "
664                                        "%x!\n", llap->llap_checksum, csum);
665                 }
666         }
667
668         llap->llap_origin = origin;
669         RETURN(llap);
670 }
671
672 static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
673                                struct ll_async_page *llap,
674                                unsigned to, obd_flag async_flags)
675 {
676         unsigned long size_index = i_size_read(inode) >> CFS_PAGE_SHIFT;
677         struct obd_io_group *oig;
678         struct ll_sb_info *sbi = ll_i2sbi(inode);
679         int rc, noquot = llap->llap_ignore_quota ? OBD_BRW_NOQUOTA : 0;
680         ENTRY;
681
682         /* _make_ready only sees llap once we've unlocked the page */
683         llap->llap_write_queued = 1;
684         rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
685                                 llap->llap_cookie, OBD_BRW_WRITE | noquot,
686                                 0, 0, 0, async_flags);
687         if (rc == 0) {
688                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
689                 GOTO(out, 0);
690         }
691
692         llap->llap_write_queued = 0;
693         /* Do not pass llap here as it is sync write. */
694         llap_write_pending(inode, NULL);
695         
696         rc = oig_init(&oig);
697         if (rc)
698                 GOTO(out, rc);
699
700         /* make full-page requests if we are not at EOF (bug 4410) */
701         if (to != CFS_PAGE_SIZE && llap->llap_page->index < size_index) {
702                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
703                                "sync write before EOF: size_index %lu, to %d\n",
704                                size_index, to);
705                 to = CFS_PAGE_SIZE;
706         } else if (to != CFS_PAGE_SIZE && llap->llap_page->index == size_index) {
707                 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
708                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
709                                "sync write at EOF: size_index %lu, to %d/%d\n",
710                                size_index, to, size_to);
711                 if (to < size_to)
712                         to = size_to;
713         }
714
715         /* compare the checksum once before the page leaves llite */
716         if (unlikely((sbi->ll_flags & LL_SBI_CHECKSUM) &&
717                      llap->llap_checksum != 0)) {
718                 __u32 csum = 0;
719                 struct page *page = llap->llap_page;
720                 char *kaddr = kmap_atomic(page, KM_USER0);
721                 csum = crc32_le(csum, kaddr, CFS_PAGE_SIZE);
722                 kunmap_atomic(kaddr, KM_USER0);
723                 if (llap->llap_checksum == csum) {
724                         CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
725                                page, csum);
726                 } else {
727                         CERROR("page %p old cksum %x != new cksum %x!\n",
728                                page, llap->llap_checksum, csum);
729                 }
730         }
731
732         rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
733                                 llap->llap_cookie, OBD_BRW_WRITE | noquot,
734                                 0, to, 0, ASYNC_READY | ASYNC_URGENT |
735                                 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
736         if (rc)
737                 GOTO(free_oig, rc);
738
739         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
740         if (rc)
741                 GOTO(free_oig, rc);
742
743         rc = oig_wait(oig);
744
745         if (!rc && async_flags & ASYNC_READY) {
746                 unlock_page(llap->llap_page);
747                 if (PageWriteback(llap->llap_page)) {
748                         end_page_writeback(llap->llap_page);
749                 }
750         }
751
752         if (rc == 0 && llap_write_complete(inode, llap))
753                 ll_queue_done_writing(inode, 0);
754
755         LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
756
757 free_oig:
758         oig_release(oig);
759 out:
760         RETURN(rc);
761 }
762
763 /* update our write count to account for i_size increases that may have
764  * happened since we've queued the page for io. */
765
766 /* be careful not to return success without setting the page Uptodate or
767  * the next pass through prepare_write will read in stale data from disk. */
768 int ll_commit_write(struct file *file, struct page *page, unsigned from,
769                     unsigned to)
770 {
771         struct inode *inode = page->mapping->host;
772         struct ll_inode_info *lli = ll_i2info(inode);
773         struct lov_stripe_md *lsm = lli->lli_smd;
774         struct obd_export *exp;
775         struct ll_async_page *llap;
776         loff_t size;
777         int rc = 0;
778         ENTRY;
779
780         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
781         LASSERT(inode == file->f_dentry->d_inode);
782         LASSERT(PageLocked(page));
783
784         CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
785                inode, page, from, to, page->index);
786
787         llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
788         if (IS_ERR(llap))
789                 RETURN(PTR_ERR(llap));
790
791         exp = ll_i2dtexp(inode);
792         if (exp == NULL)
793                 RETURN(-EINVAL);
794
795         llap->llap_ignore_quota = capable(CAP_SYS_RESOURCE);
796
797         /*
798          * queue a write for some time in the future the first time we
799          * dirty the page.
800          *
801          * This is different from what other file systems do: they usually
802          * just mark page (and some of its buffers) dirty and rely on
803          * balance_dirty_pages() to start a write-back. Lustre wants write-back
804          * to be started earlier for the following reasons:
805          *
806          *     (1) with a large number of clients we need to limit the amount
807          *     of cached data on the clients a lot;
808          *
809          *     (2) large compute jobs generally want compute-only then io-only
810          *     and the IO should complete as quickly as possible;
811          *
812          *     (3) IO is batched up to the RPC size and is async until the
813          *     client max cache is hit
814          *     (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
815          *
816          */
817         if (!PageDirty(page)) {
818                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_MISSES, 1);
819
820                 rc = queue_or_sync_write(exp, inode, llap, to, 0);
821                 if (rc)
822                         GOTO(out, rc);
823         } else {
824                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_HITS, 1);
825         }
826
827         /* put the page in the page cache, from now on ll_removepage is
828          * responsible for cleaning up the llap.
829          * only set page dirty when it's queued to be write out */
830         if (llap->llap_write_queued)
831                 set_page_dirty(page);
832
833 out:
834         size = (((obd_off)page->index) << CFS_PAGE_SHIFT) + to;
835         ll_inode_size_lock(inode, 0);
836         if (rc == 0) {
837                 lov_stripe_lock(lsm);
838                 obd_adjust_kms(exp, lsm, size, 0);
839                 lov_stripe_unlock(lsm);
840                 if (size > i_size_read(inode))
841                         i_size_write(inode, size);
842                 SetPageUptodate(page);
843         } else if (size > i_size_read(inode)) {
844                 /* this page beyond the pales of i_size, so it can't be
845                  * truncated in ll_p_r_e during lock revoking. we must
846                  * teardown our book-keeping here. */
847                 ll_removepage(page);
848         }
849         ll_inode_size_unlock(inode, 0);
850         RETURN(rc);
851 }
852
853 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
854 {
855         struct ll_ra_info *ra = &sbi->ll_ra_info;
856         unsigned long ret;
857         ENTRY;
858
859         spin_lock(&sbi->ll_lock);
860         ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
861         ra->ra_cur_pages += ret;
862         spin_unlock(&sbi->ll_lock);
863
864         RETURN(ret);
865 }
866
867 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
868 {
869         struct ll_ra_info *ra = &sbi->ll_ra_info;
870         spin_lock(&sbi->ll_lock);
871         LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
872                  ra->ra_cur_pages, len);
873         ra->ra_cur_pages -= len;
874         spin_unlock(&sbi->ll_lock);
875 }
876
877 /* called for each page in a completed rpc.*/
878 int ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
879 {
880         struct ll_async_page *llap;
881         struct page *page;
882         int ret = 0;
883         ENTRY;
884
885         llap = LLAP_FROM_COOKIE(data);
886         page = llap->llap_page;
887         LASSERT(PageLocked(page));
888         LASSERT(CheckWriteback(page,cmd));
889         
890         LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
891
892         if (cmd & OBD_BRW_READ && llap->llap_defer_uptodate)
893                 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
894
895         if (rc == 0)  {
896                 if (cmd & OBD_BRW_READ) {
897                         if (!llap->llap_defer_uptodate)
898                                 SetPageUptodate(page);
899                 } else {
900                         llap->llap_write_queued = 0;
901                 }
902                 ClearPageError(page);
903         } else {
904                 if (cmd & OBD_BRW_READ) {
905                         llap->llap_defer_uptodate = 0;
906                 }
907                 SetPageError(page);
908                 if (rc == -ENOSPC)
909                         set_bit(AS_ENOSPC, &page->mapping->flags);
910                 else
911                         set_bit(AS_EIO, &page->mapping->flags);
912         }
913
914         unlock_page(page);
915
916         if (cmd & OBD_BRW_WRITE) {
917                 /* Only rc == 0, write succeed, then this page could be deleted
918                  * from the pending_writing list 
919                  */
920                 if (rc == 0 && llap_write_complete(page->mapping->host, llap))
921                         ll_queue_done_writing(page->mapping->host, 0);
922         }
923
924         if (PageWriteback(page)) {
925                 end_page_writeback(page);
926         }
927         page_cache_release(page);
928
929         RETURN(ret);
930 }
931
932 /* the kernel calls us here when a page is unhashed from the page cache.
933  * the page will be locked and the kernel is holding a spinlock, so
934  * we need to be careful.  we're just tearing down our book-keeping
935  * here. */
936 void ll_removepage(struct page *page)
937 {
938         struct inode *inode = page->mapping->host;
939         struct obd_export *exp;
940         struct ll_async_page *llap;
941         struct ll_sb_info *sbi = ll_i2sbi(inode);
942         int rc;
943         ENTRY;
944
945         LASSERT(!in_interrupt());
946
947         /* sync pages or failed read pages can leave pages in the page
948          * cache that don't have our data associated with them anymore */
949         if (page_private(page) == 0) {
950                 EXIT;
951                 return;
952         }
953
954         LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
955
956         exp = ll_i2dtexp(inode);
957         if (exp == NULL) {
958                 CERROR("page %p ind %lu gave null export\n", page, page->index);
959                 EXIT;
960                 return;
961         }
962
963         llap = llap_from_page(page, LLAP_ORIGIN_REMOVEPAGE);
964         if (IS_ERR(llap)) {
965                 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
966                        page->index, PTR_ERR(llap));
967                 EXIT;
968                 return;
969         }
970
971         if (llap_write_complete(inode, llap))
972                 ll_queue_done_writing(inode, 0);
973
974         rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
975                                      llap->llap_cookie);
976         if (rc != 0)
977                 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
978
979         /* this unconditional free is only safe because the page lock
980          * is providing exclusivity to memory pressure/truncate/writeback..*/
981         __clear_page_ll_data(page);
982
983         spin_lock(&sbi->ll_lock);
984         if (!list_empty(&llap->llap_pglist_item))
985                 list_del_init(&llap->llap_pglist_item);
986         sbi->ll_pglist_gen++;
987         sbi->ll_async_page_count--;
988         spin_unlock(&sbi->ll_lock);
989         OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
990         EXIT;
991 }
992
993 static int ll_page_matches(struct page *page, int fd_flags)
994 {
995         struct lustre_handle match_lockh = {0};
996         struct inode *inode = page->mapping->host;
997         ldlm_policy_data_t page_extent;
998         int flags, matches;
999         ENTRY;
1000
1001         if (unlikely(fd_flags & LL_FILE_GROUP_LOCKED))
1002                 RETURN(1);
1003
1004         page_extent.l_extent.start = (__u64)page->index << CFS_PAGE_SHIFT;
1005         page_extent.l_extent.end =
1006                 page_extent.l_extent.start + CFS_PAGE_SIZE - 1;
1007         flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
1008         if (!(fd_flags & LL_FILE_READAHEAD))
1009                 flags |= LDLM_FL_CBPENDING;
1010         matches = obd_match(ll_i2sbi(inode)->ll_dt_exp,
1011                             ll_i2info(inode)->lli_smd, LDLM_EXTENT,
1012                             &page_extent, LCK_PR | LCK_PW, &flags, inode,
1013                             &match_lockh);
1014         RETURN(matches);
1015 }
1016
1017 static int ll_issue_page_read(struct obd_export *exp,
1018                               struct ll_async_page *llap,
1019                               struct obd_io_group *oig, int defer)
1020 {
1021         struct page *page = llap->llap_page;
1022         int rc;
1023
1024         page_cache_get(page);
1025         llap->llap_defer_uptodate = defer;
1026         llap->llap_ra_used = 0;
1027         rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
1028                                 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
1029                                 CFS_PAGE_SIZE, 0, ASYNC_COUNT_STABLE |
1030                                                   ASYNC_READY | ASYNC_URGENT);
1031         if (rc) {
1032                 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
1033                 page_cache_release(page);
1034         }
1035         RETURN(rc);
1036 }
1037
1038 static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
1039 {
1040         LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
1041         ra->ra_stats[which]++;
1042 }
1043
1044 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
1045 {
1046         struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
1047         struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
1048
1049         spin_lock(&sbi->ll_lock);
1050         ll_ra_stats_inc_unlocked(ra, which);
1051         spin_unlock(&sbi->ll_lock);
1052 }
1053
1054 void ll_ra_accounting(struct ll_async_page *llap, struct address_space *mapping)
1055 {
1056         if (!llap->llap_defer_uptodate || llap->llap_ra_used)
1057                 return;
1058
1059         ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
1060 }
1061
1062 #define RAS_CDEBUG(ras) \
1063         CDEBUG(D_READA,                                                      \
1064                "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu\n", \
1065                ras->ras_last_readpage, ras->ras_consecutive_requests,        \
1066                ras->ras_consecutive_pages, ras->ras_window_start,            \
1067                ras->ras_window_len, ras->ras_next_readahead,                 \
1068                ras->ras_requests, ras->ras_request_index);
1069
1070 static int index_in_window(unsigned long index, unsigned long point,
1071                            unsigned long before, unsigned long after)
1072 {
1073         unsigned long start = point - before, end = point + after;
1074
1075         if (start > point)
1076                start = 0;
1077         if (end < point)
1078                end = ~0;
1079
1080         return start <= index && index <= end;
1081 }
1082
1083 static struct ll_readahead_state *ll_ras_get(struct file *f)
1084 {
1085         struct ll_file_data       *fd;
1086
1087         fd = LUSTRE_FPRIVATE(f);
1088         return &fd->fd_ras;
1089 }
1090
1091 void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
1092 {
1093         struct ll_readahead_state *ras;
1094
1095         ras = ll_ras_get(f);
1096
1097         spin_lock(&ras->ras_lock);
1098         ras->ras_requests++;
1099         ras->ras_request_index = 0;
1100         ras->ras_consecutive_requests++;
1101         rar->lrr_reader = current;
1102
1103         list_add(&rar->lrr_linkage, &ras->ras_read_beads);
1104         spin_unlock(&ras->ras_lock);
1105 }
1106
1107 void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
1108 {
1109         struct ll_readahead_state *ras;
1110
1111         ras = ll_ras_get(f);
1112
1113         spin_lock(&ras->ras_lock);
1114         list_del_init(&rar->lrr_linkage);
1115         spin_unlock(&ras->ras_lock);
1116 }
1117
1118 static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
1119 {
1120         struct ll_ra_read *scan;
1121
1122         list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
1123                 if (scan->lrr_reader == current)
1124                         return scan;
1125         }
1126         return NULL;
1127 }
1128
1129 struct ll_ra_read *ll_ra_read_get(struct file *f)
1130 {
1131         struct ll_readahead_state *ras;
1132         struct ll_ra_read         *bead;
1133
1134         ras = ll_ras_get(f);
1135
1136         spin_lock(&ras->ras_lock);
1137         bead = ll_ra_read_get_locked(ras);
1138         spin_unlock(&ras->ras_lock);
1139         return bead;
1140 }
1141
1142 static int ll_readahead(struct ll_readahead_state *ras,
1143                          struct obd_export *exp, struct address_space *mapping,
1144                          struct obd_io_group *oig, int flags)
1145 {
1146         unsigned long i, start = 0, end = 0, reserved;
1147         struct ll_async_page *llap;
1148         struct page *page;
1149         int rc, ret = 0, match_failed = 0;
1150         __u64 kms;
1151         unsigned int gfp_mask;
1152         struct inode *inode;
1153         struct lov_stripe_md *lsm;
1154         struct ll_ra_read *bead;
1155         struct ost_lvb lvb;
1156         ENTRY;
1157
1158         inode = mapping->host;
1159         lsm = ll_i2info(inode)->lli_smd;
1160
1161         lov_stripe_lock(lsm);
1162         inode_init_lvb(inode, &lvb);
1163         obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
1164         kms = lvb.lvb_size;
1165         lov_stripe_unlock(lsm);
1166         if (kms == 0) {
1167                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
1168                 RETURN(0);
1169         }
1170
1171         spin_lock(&ras->ras_lock);
1172         bead = ll_ra_read_get_locked(ras);
1173         /* Enlarge the RA window to encompass the full read */
1174         if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
1175             bead->lrr_start + bead->lrr_count) {
1176                 ras->ras_window_len = bead->lrr_start + bead->lrr_count -
1177                                       ras->ras_window_start;
1178         }
1179         /* Reserve a part of the read-ahead window that we'll be issuing */
1180         if (ras->ras_window_len) {
1181                 start = ras->ras_next_readahead;
1182                 end = ras->ras_window_start + ras->ras_window_len - 1;
1183         }
1184         if (end != 0) {
1185                 /* Truncate RA window to end of file */
1186                 end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
1187                 ras->ras_next_readahead = max(end, end + 1);
1188                 RAS_CDEBUG(ras);
1189         }
1190         spin_unlock(&ras->ras_lock);
1191
1192         if (end == 0) {
1193                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
1194                 RETURN(0);
1195         }
1196
1197         reserved = ll_ra_count_get(ll_i2sbi(inode), end - start + 1);
1198         if (reserved < end - start + 1)
1199                 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
1200
1201         gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
1202 #ifdef __GFP_NOWARN
1203         gfp_mask |= __GFP_NOWARN;
1204 #endif
1205
1206         for (i = start; reserved > 0 && !match_failed && i <= end; i++) {
1207                 /* skip locked pages from previous readpage calls */
1208                 page = grab_cache_page_nowait_gfp(mapping, i, gfp_mask);
1209                 if (page == NULL) {
1210                         ll_ra_stats_inc(mapping, RA_STAT_FAILED_GRAB_PAGE);
1211                         CDEBUG(D_READA, "g_c_p_n failed\n");
1212                         continue;
1213                 }
1214
1215                 /* Check if page was truncated or reclaimed */
1216                 if (page->mapping != mapping) {
1217                         ll_ra_stats_inc(mapping, RA_STAT_WRONG_GRAB_PAGE);
1218                         CDEBUG(D_READA, "g_c_p_n returned invalid page\n");
1219                         goto next_page;
1220                 }
1221
1222                 /* we do this first so that we can see the page in the /proc
1223                  * accounting */
1224                 llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
1225                 if (IS_ERR(llap) || llap->llap_defer_uptodate)
1226                         goto next_page;
1227
1228                 /* skip completed pages */
1229                 if (Page_Uptodate(page))
1230                         goto next_page;
1231
1232                 /* bail when we hit the end of the lock. */
1233                 if ((rc = ll_page_matches(page, flags|LL_FILE_READAHEAD)) <= 0){
1234                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
1235                                        "lock match failed: rc %d\n", rc);
1236                         ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
1237                         match_failed = 1;
1238                         goto next_page;
1239                 }
1240
1241                 rc = ll_issue_page_read(exp, llap, oig, 1);
1242                 if (rc == 0) {
1243                         reserved--;
1244                         ret++;
1245                         LL_CDEBUG_PAGE(D_READA| D_PAGE, page,
1246                                        "started read-ahead\n");
1247                 } else {
1248         next_page:
1249                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
1250                                        "skipping read-ahead\n");
1251
1252                         unlock_page(page);
1253                 }
1254                 page_cache_release(page);
1255         }
1256
1257         LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
1258         if (reserved != 0)
1259                 ll_ra_count_put(ll_i2sbi(inode), reserved);
1260         if (i == end + 1 && end == (kms >> CFS_PAGE_SHIFT))
1261                 ll_ra_stats_inc(mapping, RA_STAT_EOF);
1262
1263         /* if we didn't get to the end of the region we reserved from
1264          * the ras we need to go back and update the ras so that the
1265          * next read-ahead tries from where we left off.  we only do so
1266          * if the region we failed to issue read-ahead on is still ahead
1267          * of the app and behind the next index to start read-ahead from */
1268         if (i != end + 1) {
1269                 spin_lock(&ras->ras_lock);
1270                 if (i < ras->ras_next_readahead &&
1271                     index_in_window(i, ras->ras_window_start, 0,
1272                                     ras->ras_window_len)) {
1273                         ras->ras_next_readahead = i;
1274                         RAS_CDEBUG(ras);
1275                 }
1276                 spin_unlock(&ras->ras_lock);
1277         }
1278
1279         RETURN(ret);
1280 }
1281
1282 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
1283 {
1284         ras->ras_window_start = index & (~((1024 * 1024 >> CFS_PAGE_SHIFT) - 1));
1285 }
1286
1287 /* called with the ras_lock held or from places where it doesn't matter */
1288 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
1289 {
1290         ras->ras_last_readpage = index;
1291         ras->ras_consecutive_requests = 0;
1292         ras->ras_consecutive_pages = 0;
1293         ras->ras_window_len = 0;
1294         ras_set_start(ras, index);
1295         ras->ras_next_readahead = max(ras->ras_window_start, index);
1296
1297         RAS_CDEBUG(ras);
1298 }
1299
1300 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
1301 {
1302         spin_lock_init(&ras->ras_lock);
1303         ras_reset(ras, 0);
1304         ras->ras_requests = 0;
1305         INIT_LIST_HEAD(&ras->ras_read_beads);
1306 }
1307
1308 static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
1309                        struct ll_readahead_state *ras, unsigned long index,
1310                        unsigned hit)
1311 {
1312         struct ll_ra_info *ra = &sbi->ll_ra_info;
1313         int zero = 0;
1314         ENTRY;
1315
1316         spin_lock(&sbi->ll_lock);
1317         spin_lock(&ras->ras_lock);
1318
1319         ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
1320
1321         /* reset the read-ahead window in two cases.  First when the app seeks
1322          * or reads to some other part of the file.  Secondly if we get a
1323          * read-ahead miss that we think we've previously issued.  This can
1324          * be a symptom of there being so many read-ahead pages that the VM is
1325          * reclaiming it before we get to it. */
1326         if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
1327                 zero = 1;
1328                 ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
1329         } else if (!hit && ras->ras_window_len &&
1330                    index < ras->ras_next_readahead &&
1331                    index_in_window(index, ras->ras_window_start, 0,
1332                                    ras->ras_window_len)) {
1333                 zero = 1;
1334                 ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
1335         }
1336
1337         /* On the second access to a file smaller than the tunable
1338          * ra_max_read_ahead_whole_pages trigger RA on all pages in the
1339          * file up to ra_max_pages.  This is simply a best effort and
1340          * only occurs once per open file.  Normal RA behavior is reverted
1341          * to for subsequent IO.  The mmap case does not increment
1342          * ras_requests and thus can never trigger this behavior. */
1343         if (ras->ras_requests == 2 && !ras->ras_request_index) {
1344                 __u64 kms_pages;
1345
1346                 kms_pages = (i_size_read(inode) + CFS_PAGE_SIZE - 1) >>
1347                             CFS_PAGE_SHIFT;
1348
1349                 CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages,
1350                        ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages);
1351
1352                 if (kms_pages &&
1353                     kms_pages <= ra->ra_max_read_ahead_whole_pages) {
1354                         ras->ras_window_start = 0;
1355                         ras->ras_last_readpage = 0;
1356                         ras->ras_next_readahead = 0;
1357                         ras->ras_window_len = min(ra->ra_max_pages,
1358                                 ra->ra_max_read_ahead_whole_pages);
1359                         GOTO(out_unlock, 0);
1360                 }
1361         }
1362
1363         if (zero) {
1364                 ras_reset(ras, index);
1365                 GOTO(out_unlock, 0);
1366         }
1367
1368         ras->ras_last_readpage = index;
1369         ras->ras_consecutive_pages++;
1370         ras_set_start(ras, index);
1371         ras->ras_next_readahead = max(ras->ras_window_start,
1372                                       ras->ras_next_readahead);
1373
1374         /* Trigger RA in the mmap case where ras_consecutive_requests
1375          * is not incremented and thus can't be used to trigger RA */
1376         if (!ras->ras_window_len && ras->ras_consecutive_pages == 3) {
1377                 ras->ras_window_len = 1024 * 1024 >> CFS_PAGE_SHIFT;
1378                 GOTO(out_unlock, 0);
1379         }
1380
1381         /* The initial ras_window_len is set to the request size.  To avoid
1382          * uselessly reading and discarding pages for random IO the window is
1383          * only increased once per consecutive request received. */
1384         if (ras->ras_consecutive_requests > 1 && !ras->ras_request_index) {
1385                 ras->ras_window_len = min(ras->ras_window_len +
1386                                           (1024 * 1024 >> CFS_PAGE_SHIFT),
1387                                           ra->ra_max_pages);
1388         }
1389
1390         EXIT;
1391 out_unlock:
1392         RAS_CDEBUG(ras);
1393         ras->ras_request_index++;
1394         spin_unlock(&ras->ras_lock);
1395         spin_unlock(&sbi->ll_lock);
1396         return;
1397 }
1398
1399 int ll_writepage(struct page *page)
1400 {
1401         struct inode *inode = page->mapping->host;
1402         struct ll_inode_info *lli = ll_i2info(inode);
1403         struct obd_export *exp;
1404         struct ll_async_page *llap;
1405         int rc = 0;
1406         ENTRY;
1407
1408         LASSERT(PageLocked(page));
1409
1410         exp = ll_i2dtexp(inode);
1411         if (exp == NULL)
1412                 GOTO(out, rc = -EINVAL);
1413
1414         llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
1415         if (IS_ERR(llap))
1416                 GOTO(out, rc = PTR_ERR(llap));
1417
1418         LASSERT(!PageWriteback(page));
1419         set_page_writeback(page);
1420
1421         page_cache_get(page);
1422         if (llap->llap_write_queued) {
1423                 LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
1424                 rc = obd_set_async_flags(exp, lli->lli_smd, NULL,
1425                                          llap->llap_cookie,
1426                                          ASYNC_READY | ASYNC_URGENT);
1427         } else {
1428                 rc = queue_or_sync_write(exp, inode, llap, CFS_PAGE_SIZE,
1429                                          ASYNC_READY | ASYNC_URGENT);
1430         }
1431         if (rc)
1432                 page_cache_release(page);
1433 out:
1434         if (rc) {
1435                 if (!lli->lli_async_rc)
1436                         lli->lli_async_rc = rc;
1437                 /* re-dirty page on error so it retries write */
1438                 if (PageWriteback(page)) {
1439                         end_page_writeback(page);
1440                 }
1441                 /* resend page only for not started IO*/
1442                 if (!PageError(page))
1443                         ll_redirty_page(page);
1444                 unlock_page(page);
1445         }
1446         RETURN(rc);
1447 }
1448
1449 /*
1450  * for now we do our readpage the same on both 2.4 and 2.5.  The kernel's
1451  * read-ahead assumes it is valid to issue readpage all the way up to
1452  * i_size, but our dlm locks make that not the case.  We disable the
1453  * kernel's read-ahead and do our own by walking ahead in the page cache
1454  * checking for dlm lock coverage.  the main difference between 2.4 and
1455  * 2.6 is how read-ahead gets batched and issued, but we're using our own,
1456  * so they look the same.
1457  */
1458 int ll_readpage(struct file *filp, struct page *page)
1459 {
1460         struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
1461         struct inode *inode = page->mapping->host;
1462         struct obd_export *exp;
1463         struct ll_async_page *llap;
1464         struct obd_io_group *oig = NULL;
1465         int rc;
1466         ENTRY;
1467
1468         LASSERT(PageLocked(page));
1469         LASSERT(!PageUptodate(page));
1470         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset=%Lu=%#Lx\n",
1471                inode->i_ino, inode->i_generation, inode,
1472                (((loff_t)page->index) << CFS_PAGE_SHIFT),
1473                (((loff_t)page->index) << CFS_PAGE_SHIFT));
1474         LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
1475
1476         if (!ll_i2info(inode)->lli_smd) {
1477                 /* File with no objects - one big hole */
1478                 /* We use this just for remove_from_page_cache that is not
1479                  * exported, we'd make page back up to date. */
1480                 ll_truncate_complete_page(page);
1481                 clear_page(kmap(page));
1482                 kunmap(page);
1483                 SetPageUptodate(page);
1484                 unlock_page(page);
1485                 RETURN(0);
1486         }
1487
1488         rc = oig_init(&oig);
1489         if (rc < 0)
1490                 GOTO(out, rc);
1491
1492         exp = ll_i2dtexp(inode);
1493         if (exp == NULL)
1494                 GOTO(out, rc = -EINVAL);
1495
1496         llap = llap_from_page(page, LLAP_ORIGIN_READPAGE);
1497         if (IS_ERR(llap))
1498                 GOTO(out, rc = PTR_ERR(llap));
1499
1500         if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1501                 ras_update(ll_i2sbi(inode), inode, &fd->fd_ras, page->index,
1502                            llap->llap_defer_uptodate);
1503
1504
1505         if (llap->llap_defer_uptodate) {
1506                 /* This is the callpath if we got the page from a readahead */
1507                 llap->llap_ra_used = 1;
1508                 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1509                                   fd->fd_flags);
1510                 if (rc > 0)
1511                         obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
1512                                              NULL, oig);
1513                 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
1514                 SetPageUptodate(page);
1515                 unlock_page(page);
1516                 GOTO(out_oig, rc = 0);
1517         }
1518
1519         if (likely((fd->fd_flags & LL_FILE_IGNORE_LOCK) == 0)) {
1520                 rc = ll_page_matches(page, fd->fd_flags);
1521                 if (rc < 0) {
1522                         LL_CDEBUG_PAGE(D_ERROR, page,
1523                                        "lock match failed: rc %d\n", rc);
1524                         GOTO(out, rc);
1525                 }
1526
1527                 if (rc == 0) {
1528                         CWARN("ino %lu page %lu (%llu) not covered by "
1529                               "a lock (mmap?).  check debug logs.\n",
1530                               inode->i_ino, page->index,
1531                               (long long)page->index << CFS_PAGE_SHIFT);
1532                 }
1533         }
1534
1535         rc = ll_issue_page_read(exp, llap, oig, 0);
1536         if (rc)
1537                 GOTO(out, rc);
1538
1539         LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
1540         /* We have just requested the actual page we want, see if we can tack
1541          * on some readahead to that page's RPC before it is sent. */
1542         if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1543                 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1544                              fd->fd_flags);
1545
1546         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
1547
1548 out:
1549         if (rc)
1550                 unlock_page(page);
1551 out_oig:
1552         if (oig != NULL)
1553                 oig_release(oig);
1554         RETURN(rc);
1555 }