Whamcloud - gitweb
b=5383
[fs/lustre-release.git] / lustre / llite / rw.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Lite I/O page cache routines shared by different kernel revs
5  *
6  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #include <linux/config.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/string.h>
28 #include <linux/stat.h>
29 #include <linux/errno.h>
30 #include <linux/smp_lock.h>
31 #include <linux/unistd.h>
32 #include <linux/version.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
35
36 #include <linux/fs.h>
37 #include <linux/stat.h>
38 #include <asm/uaccess.h>
39 #include <asm/segment.h>
40 #include <linux/mm.h>
41 #include <linux/pagemap.h>
42 #include <linux/smp_lock.h>
43
44 #define DEBUG_SUBSYSTEM S_LLITE
45
46 #include <linux/lustre_mds.h>
47 #include <linux/lustre_lite.h>
48 #include "llite_internal.h"
49 #include <linux/lustre_compat25.h>
50
51 #ifndef list_for_each_prev_safe
52 #define list_for_each_prev_safe(pos, n, head) \
53         for (pos = (head)->prev, n = pos->prev; pos != (head); \
54                 pos = n, n = pos->prev )
55 #endif
56
57 kmem_cache_t *ll_async_page_slab = NULL;
58 size_t ll_async_page_slab_size = 0;
59
60 /* SYNCHRONOUS I/O to object storage for an inode */
61 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
62                   struct page *page, int flags)
63 {
64         struct ll_inode_info *lli = ll_i2info(inode);
65         struct lov_stripe_md *lsm = lli->lli_smd;
66         struct brw_page pg;
67         int rc;
68         ENTRY;
69
70         pg.pg = page;
71         pg.off = ((obd_off)page->index) << PAGE_SHIFT;
72
73         if (cmd == OBD_BRW_WRITE && (pg.off + PAGE_SIZE > inode->i_size))
74                 pg.count = inode->i_size % PAGE_SIZE;
75         else
76                 pg.count = PAGE_SIZE;
77
78         LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
79                        cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
80                        inode->i_ino, pg.off, pg.off);
81         if (pg.count == 0) {
82                 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
83                        LPU64"\n",
84                        inode->i_ino, inode, inode->i_size, page->mapping->host,
85                        page->mapping->host->i_size, page->index, pg.off);
86         }
87
88         pg.flag = flags;
89
90         if (cmd == OBD_BRW_WRITE)
91                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
92                                     LPROC_LL_BRW_WRITE, pg.count);
93         else
94                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
95                                     LPROC_LL_BRW_READ, pg.count);
96         rc = obd_brw(cmd, ll_i2obdexp(inode), oa, lsm, 1, &pg, NULL);
97         if (rc == 0)
98                 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
99         else if (rc != -EIO)
100                 CERROR("error from obd_brw: rc = %d\n", rc);
101         RETURN(rc);
102 }
103
104 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
105
106 /* this isn't where truncate starts.   roughly:
107  * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate
108  * we grab the lock back in setattr_raw to avoid races. */
109 void ll_truncate(struct inode *inode)
110 {
111         struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
112         struct obdo oa;
113         int rc;
114         ENTRY;
115         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %llu\n", inode->i_ino,
116                inode->i_generation, inode, inode->i_size);
117
118         if (!lsm) {
119                 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
120                        inode->i_ino);
121                 EXIT;
122                 return;
123         }
124
125         if (lov_merge_size(lsm, 0) == inode->i_size) {
126                 CDEBUG(D_VFSTRACE, "skipping punch for "LPX64" (size = %llu)\n",
127                        lsm->lsm_object_id, inode->i_size);
128         } else {
129                 CDEBUG(D_INFO, "calling punch for "LPX64" (new size %llu)\n",
130                        lsm->lsm_object_id, inode->i_size);
131
132                 oa.o_id = lsm->lsm_object_id;
133                 oa.o_valid = OBD_MD_FLID;
134                 obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |
135                                 OBD_MD_FLATIME |OBD_MD_FLMTIME |OBD_MD_FLCTIME);
136
137                 /* truncate == punch from new size to absolute end of file */
138                 /* NB: must call obd_punch with i_sem held!  It updates kms! */
139                 rc = obd_punch(ll_i2obdexp(inode), &oa, lsm, inode->i_size,
140                                OBD_OBJECT_EOF, NULL);
141                 if (rc)
142                         CERROR("obd_truncate fails (%d) ino %lu\n", rc,
143                                inode->i_ino);
144                 else
145                         obdo_to_inode(inode, &oa, OBD_MD_FLSIZE|OBD_MD_FLBLOCKS|
146                                       OBD_MD_FLATIME | OBD_MD_FLMTIME |
147                                       OBD_MD_FLCTIME);
148         }
149
150         EXIT;
151         return;
152 } /* ll_truncate */
153
154 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
155 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
156                      unsigned to)
157 {
158         struct inode *inode = page->mapping->host;
159         struct ll_inode_info *lli = ll_i2info(inode);
160         struct lov_stripe_md *lsm = lli->lli_smd;
161         obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
162         struct brw_page pga;
163         struct obdo oa;
164         __u64 kms;
165         int rc = 0;
166         ENTRY;
167
168         LASSERT(PageLocked(page));
169         (void)llap_cast_private(page); /* assertion */
170
171         /* Check to see if we should return -EIO right away */
172         pga.pg = page;
173         pga.off = offset;
174         pga.count = PAGE_SIZE;
175         pga.flag = 0;
176
177         oa.o_id = lsm->lsm_object_id;
178         oa.o_mode = inode->i_mode;
179         oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | OBD_MD_FLTYPE;
180
181         rc = obd_brw(OBD_BRW_CHECK, ll_i2obdexp(inode), &oa, lsm, 1, &pga,
182                      NULL);
183         if (rc)
184                 RETURN(rc);
185
186         if (PageUptodate(page)) {
187                 LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
188                 RETURN(0);
189         }
190
191         /* We're completely overwriting an existing page, so _don't_ set it up
192          * to date until commit_write */
193         if (from == 0 && to == PAGE_SIZE) {
194                 LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
195                 POISON_PAGE(page, 0x11);
196                 RETURN(0);
197         }
198
199         /* If are writing to a new page, no need to read old data.  The extent
200          * locking will have updated the KMS, and for our purposes here we can
201          * treat it like i_size. */
202         kms = lov_merge_size(lsm, 1);
203         if (kms <= offset) {
204                 LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
205                                kms, offset);
206                 memset(kmap(page), 0, PAGE_SIZE);
207                 kunmap(page);
208                 GOTO(prepare_done, rc = 0);
209         }
210
211         /* XXX could be an async ocp read.. read-ahead? */
212         rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
213         if (rc == 0) {
214                 /* bug 1598: don't clobber blksize */
215                 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
216                 obdo_refresh_inode(inode, &oa, oa.o_valid);
217         }
218
219         EXIT;
220  prepare_done:
221         if (rc == 0)
222                 SetPageUptodate(page);
223
224         return rc;
225 }
226
227 struct ll_async_page *llap_from_cookie(void *cookie)
228 {
229         struct ll_async_page *llap = cookie;
230         if (llap->llap_magic != LLAP_MAGIC)
231                 return ERR_PTR(-EINVAL);
232         return llap;
233 };
234
235 static int ll_ap_make_ready(void *data, int cmd)
236 {
237         struct ll_async_page *llap;
238         struct page *page;
239         ENTRY;
240
241         llap = llap_from_cookie(data);
242         if (IS_ERR(llap))
243                 RETURN(-EINVAL);
244
245         page = llap->llap_page;
246
247         LASSERT(cmd != OBD_BRW_READ);
248
249         /* we're trying to write, but the page is locked.. come back later */
250         if (TryLockPage(page))
251                 RETURN(-EAGAIN);
252
253         LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
254         page_cache_get(page);
255
256         /* if we left PageDirty we might get another writepage call
257          * in the future.  list walkers are bright enough
258          * to check page dirty so we can leave it on whatever list
259          * its on.  XXX also, we're called with the cli list so if
260          * we got the page cache list we'd create a lock inversion
261          * with the removepage path which gets the page lock then the
262          * cli lock */
263         clear_page_dirty(page);
264         RETURN(0);
265 }
266
267 /* We have two reasons for giving llite the opportunity to change the
268  * write length of a given queued page as it builds the RPC containing
269  * the page:
270  *
271  * 1) Further extending writes may have landed in the page cache
272  *    since a partial write first queued this page requiring us
273  *    to write more from the page cache.
274  * 2) We might have raced with truncate and want to avoid performing
275  *    write RPCs that are just going to be thrown away by the
276  *    truncate's punch on the storage targets.
277  *
278  * The kms serves these purposes as it is set at both truncate and extending
279  * writes.
280  */
281 static int ll_ap_refresh_count(void *data, int cmd)
282 {
283         struct ll_async_page *llap;
284         struct lov_stripe_md *lsm;
285         struct page *page;
286         __u64 kms;
287         ENTRY;
288
289         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
290         LASSERT(cmd != OBD_BRW_READ);
291
292         llap = llap_from_cookie(data);
293         if (IS_ERR(llap))
294                 RETURN(PTR_ERR(llap));
295
296         page = llap->llap_page;
297         lsm = ll_i2info(page->mapping->host)->lli_smd;
298         kms = lov_merge_size(lsm, 1);
299
300         /* catch race with truncate */
301         if (((__u64)page->index << PAGE_SHIFT) >= kms)
302                 return 0;
303
304         /* catch sub-page write at end of file */
305         if (((__u64)page->index << PAGE_SHIFT) + PAGE_SIZE > kms)
306                 return kms % PAGE_SIZE;
307
308         return PAGE_SIZE;
309 }
310
311 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
312 {
313         struct lov_stripe_md *lsm;
314         obd_flag valid_flags;
315
316         lsm = ll_i2info(inode)->lli_smd;
317
318         oa->o_id = lsm->lsm_object_id;
319         oa->o_valid = OBD_MD_FLID;
320         valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
321         if (cmd == OBD_BRW_WRITE) {
322                 oa->o_valid |= OBD_MD_FLIFID | OBD_MD_FLEPOCH;
323                 mdc_pack_fid(obdo_fid(oa), inode->i_ino, 0, inode->i_mode);
324                 oa->o_easize = ll_i2info(inode)->lli_io_epoch;
325
326                 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME;
327         }
328
329         obdo_from_inode(oa, inode, valid_flags);
330 }
331
332 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
333 {
334         struct ll_async_page *llap;
335         ENTRY;
336
337         llap = llap_from_cookie(data);
338         if (IS_ERR(llap)) {
339                 EXIT;
340                 return;
341         }
342
343         ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
344         EXIT;
345 }
346
347 static struct obd_async_page_ops ll_async_page_ops = {
348         .ap_make_ready =        ll_ap_make_ready,
349         .ap_refresh_count =     ll_ap_refresh_count,
350         .ap_fill_obdo =         ll_ap_fill_obdo,
351         .ap_completion =        ll_ap_completion,
352 };
353
354 struct ll_async_page *llap_cast_private(struct page *page)
355 {
356         struct ll_async_page *llap = (struct ll_async_page *)page->private;
357
358         LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
359                  "page %p private %lu gave magic %d which != %d\n",
360                  page, page->private, llap->llap_magic, LLAP_MAGIC);
361
362         return llap;
363 }
364
365 /* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
366  *
367  * There is an llap attached onto every page in lustre, linked off @sbi.
368  * We add an llap to the list so we don't lose our place during list walking.
369  * If llaps in the list are being moved they will only move to the end
370  * of the LRU, and we aren't terribly interested in those pages here (we
371  * start at the beginning of the list where the least-used llaps are.
372  */
373 int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
374 {
375         struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
376         unsigned long total, want, count = 0;
377
378         total = sbi->ll_async_page_count;
379
380         /* There can be a large number of llaps (600k or more in a large
381          * memory machine) so the VM 1/6 shrink ratio is likely too much.
382          * Since we are freeing pages also, we don't necessarily want to
383          * shrink so much.  Limit to 40MB of pages + llaps per call. */
384         if (shrink_fraction == 0)
385                 want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
386         else
387                 want = (total + shrink_fraction - 1) / shrink_fraction;
388
389         if (want > 40 << (20 - PAGE_CACHE_SHIFT))
390                 want = 40 << (20 - PAGE_CACHE_SHIFT);
391
392         CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
393                want, total, shrink_fraction);
394
395         spin_lock(&sbi->ll_lock);
396         list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
397
398         while (--total >= 0 && count < want) {
399                 struct page *page;
400
401                 if (unlikely(need_resched())) {
402                         spin_unlock(&sbi->ll_lock);
403                         cond_resched();
404                         spin_lock(&sbi->ll_lock);
405                 }
406
407                 llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
408                 list_del_init(&dummy_llap.llap_pglist_item);
409                 if (llap == NULL)
410                         break;
411
412                 page = llap->llap_page;
413                 LASSERT(page != NULL);
414
415                 list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
416
417                 /* Page needs/undergoing IO */
418                 if (TryLockPage(page)) {
419                         LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
420                         continue;
421                 }
422
423                 /* If page is dirty or undergoing IO don't discard it */
424                 if (llap->llap_write_queued || PageDirty(page) ||
425                     (!PageUptodate(page) &&
426                      llap->llap_origin != LLAP_ORIGIN_READAHEAD)) {
427                         unlock_page(page);
428                         LL_CDEBUG_PAGE(D_PAGE, page, "can't drop from cache: "
429                                        "%s%s%s%s origin %s\n",
430                                        llap->llap_write_queued ? "wq " : "",
431                                        PageDirty(page) ? "pd " : "",
432                                        PageUptodate(page) ? "" : "!pu ",
433                                        llap->llap_defer_uptodate ? "" : "!du",
434                                        llap_origins[llap->llap_origin]);
435                         continue;
436                 }
437
438                 page_cache_get(page);
439                 spin_unlock(&sbi->ll_lock);
440
441                 ++count;
442                 LL_CDEBUG_PAGE(D_PAGE, page, "drop from cache %lu/%lu\n",
443                                count, want);
444                 if (page->mapping != NULL) {
445                         ll_ra_accounting(page, page->mapping);
446                         ll_truncate_complete_page(page);
447                 }
448                 unlock_page(page);
449                 page_cache_release(page);
450
451                 spin_lock(&sbi->ll_lock);
452         }
453         list_del(&dummy_llap.llap_pglist_item);
454         spin_unlock(&sbi->ll_lock);
455
456         CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
457                count, want, total);
458
459         return count;
460 }
461
462 struct ll_async_page *llap_from_page(struct page *page, unsigned origin)
463 {
464         struct ll_async_page *llap;
465         struct obd_export *exp;
466         struct inode *inode = page->mapping->host;
467         struct ll_sb_info *sbi = ll_i2sbi(inode);
468         int rc;
469         ENTRY;
470
471         LASSERT(ll_async_page_slab);
472         LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
473
474         llap = llap_cast_private(page);
475         if (llap != NULL) {
476                 /* move to end of LRU list */
477                 spin_lock(&sbi->ll_lock);
478                 sbi->ll_pglist_gen++;
479                 list_del_init(&llap->llap_pglist_item);
480                 list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
481                 spin_unlock(&sbi->ll_lock);
482                 GOTO(out, llap);
483         }
484
485         exp = ll_i2obdexp(page->mapping->host);
486         if (exp == NULL)
487                 RETURN(ERR_PTR(-EINVAL));
488
489         /* limit the number of lustre-cached pages */
490         if (sbi->ll_async_page_count >= sbi->ll_async_page_max)
491                 llap_shrink_cache(sbi, 0);
492
493         OBD_SLAB_ALLOC(llap, ll_async_page_slab, SLAB_KERNEL,
494                        ll_async_page_slab_size);
495         if (llap == NULL)
496                 RETURN(ERR_PTR(-ENOMEM));
497         llap->llap_magic = LLAP_MAGIC;
498         llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
499         rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
500                                  (obd_off)page->index << PAGE_SHIFT,
501                                  &ll_async_page_ops, llap, &llap->llap_cookie);
502         if (rc) {
503                 OBD_SLAB_FREE(llap, ll_async_page_slab,
504                               ll_async_page_slab_size);
505                 RETURN(ERR_PTR(rc));
506         }
507
508         CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
509                page, llap->llap_cookie, (obd_off)page->index << PAGE_SHIFT);
510         /* also zeroing the PRIVBITS low order bitflags */
511         __set_page_ll_data(page, llap);
512         llap->llap_page = page;
513
514         spin_lock(&sbi->ll_lock);
515         sbi->ll_pglist_gen++;
516         sbi->ll_async_page_count++;
517         list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
518         spin_unlock(&sbi->ll_lock);
519
520 out:
521         llap->llap_origin = origin;
522         RETURN(llap);
523 }
524
525 static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
526                                struct ll_async_page *llap,
527                                unsigned to, obd_flag async_flags)
528 {
529         unsigned long size_index = inode->i_size >> PAGE_SHIFT;
530         struct obd_io_group *oig;
531         int rc;
532         ENTRY;
533
534         /* _make_ready only sees llap once we've unlocked the page */
535         llap->llap_write_queued = 1;
536         rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
537                                 llap->llap_cookie, OBD_BRW_WRITE, 0, 0, 0,
538                                 async_flags);
539         if (rc == 0) {
540                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
541                 //llap_write_pending(inode, llap);
542                 GOTO(out, 0);
543         }
544
545         llap->llap_write_queued = 0;
546
547         rc = oig_init(&oig);
548         if (rc)
549                 GOTO(out, rc);
550
551         /* make full-page requests if we are not at EOF (bug 4410) */
552         if (to != PAGE_SIZE && llap->llap_page->index < size_index) {
553                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
554                                "sync write before EOF: size_index %lu, to %d\n",
555                                size_index, to);
556                 to = PAGE_SIZE;
557         } else if (to != PAGE_SIZE && llap->llap_page->index == size_index) {
558                 int size_to = inode->i_size & ~PAGE_MASK;
559                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
560                                "sync write at EOF: size_index %lu, to %d/%d\n",
561                                size_index, to, size_to);
562                 if (to < size_to)
563                         to = size_to;
564         }
565
566         rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
567                                 llap->llap_cookie, OBD_BRW_WRITE, 0, to, 0,
568                                 ASYNC_READY | ASYNC_URGENT |
569                                 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
570         if (rc)
571                 GOTO(free_oig, rc);
572
573         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
574         if (rc)
575                 GOTO(free_oig, rc);
576
577         rc = oig_wait(oig);
578
579         if (!rc && async_flags & ASYNC_READY)
580                 unlock_page(llap->llap_page);
581
582         LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
583
584 free_oig:
585         oig_release(oig);
586 out:
587         RETURN(rc);
588 }
589
590 /* update our write count to account for i_size increases that may have
591  * happened since we've queued the page for io. */
592
593 /* be careful not to return success without setting the page Uptodate or
594  * the next pass through prepare_write will read in stale data from disk. */
595 int ll_commit_write(struct file *file, struct page *page, unsigned from,
596                     unsigned to)
597 {
598         struct inode *inode = page->mapping->host;
599         struct ll_inode_info *lli = ll_i2info(inode);
600         struct lov_stripe_md *lsm = lli->lli_smd;
601         struct obd_export *exp;
602         struct ll_async_page *llap;
603         loff_t size;
604         int rc = 0;
605         ENTRY;
606
607         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
608         LASSERT(inode == file->f_dentry->d_inode);
609         LASSERT(PageLocked(page));
610
611         CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
612                inode, page, from, to, page->index);
613
614         llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
615         if (IS_ERR(llap))
616                 RETURN(PTR_ERR(llap));
617
618         exp = ll_i2obdexp(inode);
619         if (exp == NULL)
620                 RETURN(-EINVAL);
621
622         /* queue a write for some time in the future the first time we
623          * dirty the page */
624         if (!PageDirty(page)) {
625                 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
626                                      LPROC_LL_DIRTY_MISSES);
627
628                 rc = queue_or_sync_write(exp, inode, llap, to, 0);
629                 if (rc)
630                         GOTO(out, rc);
631         } else {
632                 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
633                                      LPROC_LL_DIRTY_HITS);
634         }
635
636         /* put the page in the page cache, from now on ll_removepage is
637          * responsible for cleaning up the llap.
638          * only set page dirty when it's queued to be write out */
639         if (llap->llap_write_queued)
640                 set_page_dirty(page);
641
642 out:
643         size = (((obd_off)page->index) << PAGE_SHIFT) + to;
644         if (rc == 0) {
645                 spin_lock(&lli->lli_lock);
646                 obd_increase_kms(exp, lsm, size);
647                 spin_unlock(&lli->lli_lock);
648                 if (size > inode->i_size)
649                         inode->i_size = size;
650                 SetPageUptodate(page);
651         } else if (size > inode->i_size) {
652                 /* this page beyond the pales of i_size, so it can't be
653                  * truncated in ll_p_r_e during lock revoking. we must
654                  * teardown our book-keeping here. */
655                 ll_removepage(page);
656         }
657         RETURN(rc);
658 }
659
660 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
661 {
662         struct ll_ra_info *ra = &sbi->ll_ra_info;
663         unsigned long ret;
664         ENTRY;
665
666         spin_lock(&sbi->ll_lock);
667         ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
668         ra->ra_cur_pages += ret;
669         spin_unlock(&sbi->ll_lock);
670
671         RETURN(ret);
672 }
673
674 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
675 {
676         struct ll_ra_info *ra = &sbi->ll_ra_info;
677         spin_lock(&sbi->ll_lock);
678         LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
679                  ra->ra_cur_pages, len);
680         ra->ra_cur_pages -= len;
681         spin_unlock(&sbi->ll_lock);
682 }
683
684 /* called for each page in a completed rpc.*/
685 void ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
686 {
687         struct ll_async_page *llap;
688         struct page *page;
689         ENTRY;
690
691         llap = llap_from_cookie(data);
692         if (IS_ERR(llap)) {
693                 EXIT;
694                 return;
695         }
696
697         page = llap->llap_page;
698         LASSERT(PageLocked(page));
699
700         LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
701
702         if (cmd == OBD_BRW_READ && llap->llap_defer_uptodate)
703                 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
704
705         if (rc == 0)  {
706                 if (cmd == OBD_BRW_READ) {
707                         if (!llap->llap_defer_uptodate)
708                                 SetPageUptodate(page);
709                 } else {
710                         llap->llap_write_queued = 0;
711                 }
712                 ClearPageError(page);
713         } else {
714                 if (cmd == OBD_BRW_READ) {
715                         llap->llap_defer_uptodate = 0;
716                 } else {
717                         SetPageDirty(page);
718                         ClearPageLaunder(page);
719                 }
720                 SetPageError(page);
721         }
722
723         unlock_page(page);
724
725         if (0 && cmd == OBD_BRW_WRITE) {
726                 llap_write_complete(page->mapping->host, llap);
727                 ll_try_done_writing(page->mapping->host);
728         }
729
730         if (PageWriteback(page)) {
731                 end_page_writeback(page);
732         }
733         page_cache_release(page);
734         EXIT;
735 }
736
737 /* the kernel calls us here when a page is unhashed from the page cache.
738  * the page will be locked and the kernel is holding a spinlock, so
739  * we need to be careful.  we're just tearing down our book-keeping
740  * here. */
741 void ll_removepage(struct page *page)
742 {
743         struct inode *inode = page->mapping->host;
744         struct obd_export *exp;
745         struct ll_async_page *llap;
746         struct ll_sb_info *sbi = ll_i2sbi(inode);
747         int rc;
748         ENTRY;
749
750         LASSERT(!in_interrupt());
751
752         /* sync pages or failed read pages can leave pages in the page
753          * cache that don't have our data associated with them anymore */
754         if (page->private == 0) {
755                 EXIT;
756                 return;
757         }
758
759         LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
760
761         exp = ll_i2obdexp(inode);
762         if (exp == NULL) {
763                 CERROR("page %p ind %lu gave null export\n", page, page->index);
764                 EXIT;
765                 return;
766         }
767
768         llap = llap_from_page(page, 0);
769         if (IS_ERR(llap)) {
770                 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
771                        page->index, PTR_ERR(llap));
772                 EXIT;
773                 return;
774         }
775
776         //llap_write_complete(inode, llap);
777         rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
778                                      llap->llap_cookie);
779         if (rc != 0)
780                 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
781
782         /* this unconditional free is only safe because the page lock
783          * is providing exclusivity to memory pressure/truncate/writeback..*/
784         __clear_page_ll_data(page);
785
786         spin_lock(&sbi->ll_lock);
787         if (!list_empty(&llap->llap_pglist_item))
788                 list_del_init(&llap->llap_pglist_item);
789         sbi->ll_pglist_gen++;
790         sbi->ll_async_page_count--;
791         spin_unlock(&sbi->ll_lock);
792         OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
793         EXIT;
794 }
795
796 static int ll_page_matches(struct page *page)
797 {
798         struct lustre_handle match_lockh = {0};
799         struct inode *inode = page->mapping->host;
800         ldlm_policy_data_t page_extent;
801         int flags, matches;
802         ENTRY;
803
804         page_extent.l_extent.start = (__u64)page->index << PAGE_CACHE_SHIFT;
805         page_extent.l_extent.end =
806                 page_extent.l_extent.start + PAGE_CACHE_SIZE - 1;
807         flags = LDLM_FL_CBPENDING | LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
808         matches = obd_match(ll_i2sbi(inode)->ll_osc_exp,
809                             ll_i2info(inode)->lli_smd, LDLM_EXTENT,
810                             &page_extent, LCK_PR | LCK_PW, &flags, inode,
811                             &match_lockh);
812         RETURN(matches);
813 }
814
815 static int ll_issue_page_read(struct obd_export *exp,
816                               struct ll_async_page *llap,
817                               struct obd_io_group *oig, int defer)
818 {
819         struct page *page = llap->llap_page;
820         int rc;
821
822         page_cache_get(page);
823         llap->llap_defer_uptodate = defer;
824         llap->llap_ra_used = 0;
825         rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
826                                 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
827                                 PAGE_SIZE, 0, ASYNC_COUNT_STABLE | ASYNC_READY
828                                               | ASYNC_URGENT);
829         if (rc) {
830                 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
831                 page_cache_release(page);
832         }
833         RETURN(rc);
834 }
835
836 static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
837 {
838         LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
839         ra->ra_stats[which]++;
840 }
841
842 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
843 {
844         struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
845         struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
846
847         spin_lock(&sbi->ll_lock);
848         ll_ra_stats_inc_unlocked(ra, which);
849         spin_unlock(&sbi->ll_lock);
850 }
851
852 void ll_ra_accounting(struct page *page, struct address_space *mapping)
853 {
854         struct ll_async_page *llap;
855
856         llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
857         if (IS_ERR(llap))
858                 return;
859
860         if (!llap->llap_defer_uptodate || llap->llap_ra_used)
861                 return;
862
863         ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
864 }
865
866 #define RAS_CDEBUG(ras) \
867         CDEBUG(D_READA, "lrp %lu c %lu ws %lu wl %lu nra %lu\n",        \
868                ras->ras_last_readpage, ras->ras_consecutive,            \
869                ras->ras_window_start, ras->ras_window_len,              \
870                ras->ras_next_readahead);
871
872 static int index_in_window(unsigned long index, unsigned long point,
873                            unsigned long before, unsigned long after)
874 {
875         unsigned long start = point - before, end = point + after;
876
877         if (start > point)
878                start = 0;
879         if (end < point)
880                end = ~0;
881
882         return start <= index && index <= end;
883 }
884
885 static int ll_readahead(struct ll_readahead_state *ras,
886                          struct obd_export *exp, struct address_space *mapping,
887                          struct obd_io_group *oig, int flags)
888 {
889         unsigned long i, start = 0, end = 0, reserved;
890         struct ll_async_page *llap;
891         struct page *page;
892         int rc, ret = 0, match_failed = 0;
893         __u64 kms;
894         unsigned int gfp_mask;
895         ENTRY;
896
897         kms = lov_merge_size(ll_i2info(mapping->host)->lli_smd, 1);
898         if (kms == 0) {
899                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
900                 RETURN(0);
901         }
902
903         spin_lock(&ras->ras_lock);
904         /* reserve a part of the read-ahead window that we'll be issuing */
905         if (ras->ras_window_len) {
906                 start = ras->ras_next_readahead;
907                 end = ras->ras_window_start + ras->ras_window_len - 1;
908                 end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT));
909                 ras->ras_next_readahead = max(end, end + 1);
910
911                 RAS_CDEBUG(ras);
912         }
913         spin_unlock(&ras->ras_lock);
914
915         if (end == 0) {
916                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
917                 RETURN(0);
918         }
919
920         reserved = ll_ra_count_get(ll_i2sbi(mapping->host), end - start + 1);
921         if (reserved < end - start + 1)
922                 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
923
924         gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
925 #ifdef __GFP_NOWARN
926         gfp_mask |= __GFP_NOWARN;
927 #endif
928
929         for (i = start; reserved > 0 && !match_failed && i <= end; i++) {
930                 /* skip locked pages from previous readpage calls */
931                 page = grab_cache_page_nowait_gfp(mapping, i, gfp_mask);
932                 if (page == NULL) {
933                         CDEBUG(D_READA, "g_c_p_n failed\n");
934                         continue;
935                 }
936
937                 /* we do this first so that we can see the page in the /proc
938                  * accounting */
939                 llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
940                 if (IS_ERR(llap) || llap->llap_defer_uptodate)
941                         goto next_page;
942
943                 /* skip completed pages */
944                 if (Page_Uptodate(page))
945                         goto next_page;
946
947                 /* bail when we hit the end of the lock. */
948                 if ((rc = ll_page_matches(page)) <= 0) {
949                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
950                                        "lock match failed: rc %d\n", rc);
951                         ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
952                         match_failed = 1;
953                         goto next_page;
954                 }
955
956                 rc = ll_issue_page_read(exp, llap, oig, 1);
957                 if (rc == 0) {
958                         reserved--;
959                         ret++;
960                         LL_CDEBUG_PAGE(D_READA| D_PAGE, page,
961                                        "started read-ahead\n");
962                 }
963                 if (rc) {
964         next_page:
965                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
966                                        "skipping read-ahead\n");
967
968                         unlock_page(page);
969                 }
970                 page_cache_release(page);
971         }
972
973         LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
974         if (reserved != 0)
975                 ll_ra_count_put(ll_i2sbi(mapping->host), reserved);
976         if (i == end + 1 && end == (kms >> PAGE_CACHE_SHIFT))
977                 ll_ra_stats_inc(mapping, RA_STAT_EOF);
978
979         /* if we didn't get to the end of the region we reserved from
980          * the ras we need to go back and update the ras so that the
981          * next read-ahead tries from where we left off.  we only do so
982          * if the region we failed to issue read-ahead on is still ahead
983          * of the app and behind the next index to start read-ahead from */
984         if (i != end + 1) {
985                 spin_lock(&ras->ras_lock);
986                 if (i < ras->ras_next_readahead &&
987                     index_in_window(i, ras->ras_window_start, 0,
988                                     ras->ras_window_len)) {
989                         ras->ras_next_readahead = i;
990                         RAS_CDEBUG(ras);
991                 }
992                 spin_unlock(&ras->ras_lock);
993         }
994
995         RETURN(ret);
996 }
997
998 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
999 {
1000         ras->ras_window_start = index & (~(PTLRPC_MAX_BRW_PAGES - 1));
1001 }
1002
1003 /* called with the ras_lock held or from places where it doesn't matter */
1004 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
1005 {
1006         ras->ras_last_readpage = index;
1007         ras->ras_consecutive = 1;
1008         ras->ras_window_len = 0;
1009         ras_set_start(ras, index);
1010         ras->ras_next_readahead = ras->ras_window_start;
1011
1012         RAS_CDEBUG(ras);
1013 }
1014
1015 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
1016 {
1017         spin_lock_init(&ras->ras_lock);
1018         ras_reset(ras, 0);
1019 }
1020
1021 static void ras_update(struct ll_sb_info *sbi, struct ll_readahead_state *ras,
1022                        unsigned long index, unsigned hit)
1023 {
1024         struct ll_ra_info *ra = &sbi->ll_ra_info;
1025         int zero = 0;
1026         ENTRY;
1027
1028         spin_lock(&sbi->ll_lock);
1029         spin_lock(&ras->ras_lock);
1030
1031         ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
1032
1033         /* reset the read-ahead window in two cases.  First when the app seeks
1034          * or reads to some other part of the file.  Secondly if we get a
1035          * read-ahead miss that we think we've previously issued.  This can
1036          * be a symptom of there being so many read-ahead pages that the VM is
1037          * reclaiming it before we get to it. */
1038         if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
1039                 zero = 1;
1040                 ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
1041         } else if (!hit && ras->ras_window_len &&
1042                    index < ras->ras_next_readahead &&
1043                    index_in_window(index, ras->ras_window_start, 0,
1044                                    ras->ras_window_len)) {
1045                 zero = 1;
1046                 ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
1047         }
1048
1049         if (zero) {
1050                 ras_reset(ras, index);
1051                 GOTO(out_unlock, 0);
1052         }
1053
1054         ras->ras_last_readpage = index;
1055         ras->ras_consecutive++;
1056         ras_set_start(ras, index);
1057         ras->ras_next_readahead = max(ras->ras_window_start,
1058                                       ras->ras_next_readahead);
1059
1060         /* wait for a few pages to arrive before issuing readahead to avoid
1061          * the worst overutilization */
1062         if (ras->ras_consecutive == 3) {
1063                 ras->ras_window_len = PTLRPC_MAX_BRW_PAGES;
1064                 GOTO(out_unlock, 0);
1065         }
1066
1067         /* we need to increase the window sometimes.  we'll arbitrarily
1068          * do it half-way through the pages in an rpc */
1069         if ((index & (PTLRPC_MAX_BRW_PAGES - 1)) ==
1070             (PTLRPC_MAX_BRW_PAGES >> 1)) {
1071                 ras->ras_window_len += PTLRPC_MAX_BRW_PAGES;
1072                 ras->ras_window_len = min(ras->ras_window_len,
1073                                           ra->ra_max_pages);
1074         }
1075
1076         EXIT;
1077 out_unlock:
1078         RAS_CDEBUG(ras);
1079         spin_unlock(&ras->ras_lock);
1080         spin_unlock(&sbi->ll_lock);
1081         return;
1082 }
1083
1084 /*
1085  * for now we do our readpage the same on both 2.4 and 2.5.  The kernel's
1086  * read-ahead assumes it is valid to issue readpage all the way up to
1087  * i_size, but our dlm locks make that not the case.  We disable the
1088  * kernel's read-ahead and do our own by walking ahead in the page cache
1089  * checking for dlm lock coverage.  the main difference between 2.4 and
1090  * 2.6 is how read-ahead gets batched and issued, but we're using our own,
1091  * so they look the same.
1092  */
1093 int ll_readpage(struct file *filp, struct page *page)
1094 {
1095         struct ll_file_data *fd = filp->private_data;
1096         struct inode *inode = page->mapping->host;
1097         struct obd_export *exp;
1098         struct ll_async_page *llap;
1099         struct obd_io_group *oig = NULL;
1100         int rc;
1101         ENTRY;
1102
1103         LASSERT(PageLocked(page));
1104         LASSERT(!PageUptodate(page));
1105         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset="LPX64"\n",
1106                inode->i_ino, inode->i_generation, inode,
1107                (((obd_off)page->index) << PAGE_SHIFT));
1108         LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
1109
1110         rc = oig_init(&oig);
1111         if (rc < 0)
1112                 GOTO(out, rc);
1113
1114         exp = ll_i2obdexp(inode);
1115         if (exp == NULL)
1116                 GOTO(out, rc = -EINVAL);
1117
1118         llap = llap_from_page(page, LLAP_ORIGIN_READPAGE);
1119         if (IS_ERR(llap))
1120                 GOTO(out, rc = PTR_ERR(llap));
1121
1122         if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1123                 ras_update(ll_i2sbi(inode), &fd->fd_ras, page->index,
1124                            llap->llap_defer_uptodate);
1125
1126         if (llap->llap_defer_uptodate) {
1127                 llap->llap_ra_used = 1;
1128                 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1129                                   fd->fd_flags);
1130                 if (rc > 0)
1131                         obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
1132                                              NULL, oig);
1133                 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
1134                 SetPageUptodate(page);
1135                 unlock_page(page);
1136                 GOTO(out_oig, rc = 0);
1137         }
1138
1139         rc = ll_page_matches(page);
1140         if (rc < 0) {
1141                 LL_CDEBUG_PAGE(D_ERROR, page, "lock match failed: rc %d\n", rc);
1142                 GOTO(out, rc);
1143         }
1144
1145         if (rc == 0) {
1146 #if 0
1147                 CWARN("ino %lu page %lu (%llu) not covered by "
1148                       "a lock (mmap?).  check debug logs.\n",
1149                       inode->i_ino, page->index,
1150                       (long long)page->index << PAGE_CACHE_SHIFT);
1151 #endif
1152         }
1153
1154         rc = ll_issue_page_read(exp, llap, oig, 0);
1155         if (rc)
1156                 GOTO(out, rc);
1157
1158         LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
1159         if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1160                 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1161                              fd->fd_flags);
1162
1163         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
1164
1165 out:
1166         if (rc)
1167                 unlock_page(page);
1168 out_oig:
1169         if (oig != NULL)
1170                 oig_release(oig);
1171         RETURN(rc);
1172 }