Whamcloud - gitweb
- landing b_fid.
[fs/lustre-release.git] / lustre / llite / rw.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Lite I/O page cache routines shared by different kernel revs
5  *
6  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #include <linux/config.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/string.h>
28 #include <linux/stat.h>
29 #include <linux/errno.h>
30 #include <linux/smp_lock.h>
31 #include <linux/unistd.h>
32 #include <linux/version.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
35
36 #include <linux/fs.h>
37 #include <linux/stat.h>
38 #include <asm/uaccess.h>
39 #include <asm/segment.h>
40 #include <linux/mm.h>
41 #include <linux/pagemap.h>
42 #include <linux/smp_lock.h>
43
44 #define DEBUG_SUBSYSTEM S_LLITE
45
46 #include <linux/lustre_mds.h>
47 #include <linux/lustre_lite.h>
48 #include "llite_internal.h"
49 #include <linux/lustre_compat25.h>
50
51 #ifndef list_for_each_prev_safe
52 #define list_for_each_prev_safe(pos, n, head) \
53         for (pos = (head)->prev, n = pos->prev; pos != (head); \
54                 pos = n, n = pos->prev )
55 #endif
56
57 /* SYNCHRONOUS I/O to object storage for an inode */
58 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
59                   struct page *page, int flags)
60 {
61         struct ll_inode_info *lli = ll_i2info(inode);
62         struct lov_stripe_md *lsm = lli->lli_smd;
63         struct timeval start;
64         struct brw_page pg;
65         int rc;
66         ENTRY;
67
68         do_gettimeofday(&start);
69
70         pg.pg = page;
71         pg.disk_offset = pg.page_offset = ((obd_off)page->index) << PAGE_SHIFT;
72
73         if (cmd == OBD_BRW_WRITE &&
74             (pg.disk_offset + PAGE_SIZE > inode->i_size))
75                 pg.count = inode->i_size % PAGE_SIZE;
76         else
77                 pg.count = PAGE_SIZE;
78
79         CDEBUG(D_PAGE, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
80                cmd & OBD_BRW_WRITE ? "write" : "read", pg.count, inode->i_ino,
81                pg.disk_offset, pg.disk_offset);
82         if (pg.count == 0) {
83                 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
84                        LPU64"\n", inode->i_ino, inode, inode->i_size,
85                        page->mapping->host, page->mapping->host->i_size,
86                        page->index, pg.disk_offset);
87         }
88
89         pg.flag = flags;
90
91         if (cmd == OBD_BRW_WRITE)
92                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
93                                     LPROC_LL_BRW_WRITE, pg.count);
94         else
95                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
96                                     LPROC_LL_BRW_READ, pg.count);
97         rc = obd_brw(cmd, ll_i2obdexp(inode), oa, lsm, 1, &pg, NULL);
98         if (rc == 0)
99                 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
100         else if (rc != -EIO)
101                 CERROR("error from obd_brw: rc = %d\n", rc);
102         ll_stime_record(ll_i2sbi(inode), &start,
103                         &ll_i2sbi(inode)->ll_brw_stime);
104         RETURN(rc);
105 }
106
107 /* this isn't where truncate starts.   roughly:
108  * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate
109  * we grab the lock back in setattr_raw to avoid races. */
110 void ll_truncate(struct inode *inode)
111 {
112         struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
113         struct obdo oa;
114         int rc;
115         ENTRY;
116         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
117                inode->i_generation, inode);
118
119         if (!lsm) {
120                 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
121                        inode->i_ino);
122                 EXIT;
123                 return;
124         }
125
126         oa.o_id = lsm->lsm_object_id;
127         oa.o_gr = lsm->lsm_object_gr;
128         oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
129         obdo_from_inode(&oa, inode, OBD_MD_FLTYPE|OBD_MD_FLMODE|OBD_MD_FLATIME|
130                                     OBD_MD_FLMTIME | OBD_MD_FLCTIME);
131
132         CDEBUG(D_INFO, "calling punch for "LPX64" (all bytes after %Lu)\n",
133                oa.o_id, inode->i_size);
134
135         /* truncate == punch from new size to absolute end of file */
136         /* NB: obd_punch must be called with i_sem held!  It updates the kms! */
137         rc = obd_punch(ll_i2obdexp(inode), &oa, lsm, inode->i_size,
138                        OBD_OBJECT_EOF, NULL);
139         if (rc)
140                 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
141         else
142                 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
143                                           OBD_MD_FLATIME | OBD_MD_FLMTIME |
144                                           OBD_MD_FLCTIME);
145
146         EXIT;
147         return;
148 } /* ll_truncate */
149
150 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
151 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
152                      unsigned to)
153 {
154         struct inode *inode = page->mapping->host;
155         struct ll_inode_info *lli = ll_i2info(inode);
156         struct lov_stripe_md *lsm = lli->lli_smd;
157         obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
158         struct brw_page pga;
159         struct obdo oa;
160         __u64 kms;
161         int rc = 0;
162         ENTRY;
163
164         LASSERT(PageLocked(page));
165         (void)llap_cast_private(page); /* assertion */
166
167         /* Check to see if we should return -EIO right away */
168         pga.pg = page;
169         pga.disk_offset = pga.page_offset = offset;
170         pga.count = PAGE_SIZE;
171         pga.flag = 0;
172
173         oa.o_id = lsm->lsm_object_id;
174         oa.o_gr = lsm->lsm_object_gr;
175         oa.o_mode = inode->i_mode;
176         oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE
177                                 | OBD_MD_FLTYPE | OBD_MD_FLGROUP;
178
179         rc = obd_brw(OBD_BRW_CHECK, ll_i2obdexp(inode), &oa, lsm, 1, &pga,
180                      NULL);
181         if (rc)
182                 RETURN(rc);
183
184         if (PageUptodate(page))
185                 RETURN(0);
186
187         /* We're completely overwriting an existing page, so _don't_ set it up
188          * to date until commit_write */
189         if (from == 0 && to == PAGE_SIZE) {
190                 POISON_PAGE(page, 0x11);
191                 RETURN(0);
192         }
193
194         /* If are writing to a new page, no need to read old data.  The extent
195          * locking will have updated the KMS, and for our purposes here we can
196          * treat it like i_size. */
197         kms = lov_merge_size(lsm, 1);
198         if (kms <= offset) {
199                 memset(kmap(page), 0, PAGE_SIZE);
200                 kunmap(page);
201                 GOTO(prepare_done, rc = 0);
202         }
203
204         /* XXX could be an async ocp read.. read-ahead? */
205         rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
206         if (rc == 0) {
207                 /* bug 1598: don't clobber blksize */
208                 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
209                 obdo_refresh_inode(inode, &oa, oa.o_valid);
210         }
211
212         EXIT;
213  prepare_done:
214         if (rc == 0)
215                 SetPageUptodate(page);
216
217         return rc;
218 }
219
220 struct ll_async_page *llap_from_cookie(void *cookie)
221 {
222         struct ll_async_page *llap = cookie;
223         if (llap->llap_magic != LLAP_MAGIC)
224                 return ERR_PTR(-EINVAL);
225         return llap;
226 };
227
228 static int ll_ap_make_ready(void *data, int cmd)
229 {
230         struct ll_async_page *llap;
231         struct page *page;
232         ENTRY;
233
234         llap = llap_from_cookie(data);
235         if (IS_ERR(llap))
236                 RETURN(-EINVAL);
237
238         page = llap->llap_page;
239
240         LASSERT(cmd != OBD_BRW_READ);
241
242         /* we're trying to write, but the page is locked.. come back later */
243         if (TryLockPage(page))
244                 RETURN(-EAGAIN);
245
246         LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
247         page_cache_get(page);
248
249         /* if we left PageDirty we might get another writepage call
250          * in the future.  list walkers are bright enough
251          * to check page dirty so we can leave it on whatever list
252          * its on.  XXX also, we're called with the cli list so if
253          * we got the page cache list we'd create a lock inversion
254          * with the removepage path which gets the page lock then the
255          * cli lock */
256         clear_page_dirty(page);
257         RETURN(0);
258 }
259
260 /* We have two reasons for giving llite the opportunity to change the 
261  * write length of a given queued page as it builds the RPC containing
262  * the page: 
263  *
264  * 1) Further extending writes may have landed in the page cache
265  *    since a partial write first queued this page requiring us
266  *    to write more from the page cache.
267  * 2) We might have raced with truncate and want to avoid performing
268  *    write RPCs that are just going to be thrown away by the 
269  *    truncate's punch on the storage targets.
270  *
271  * The kms serves these purposes as it is set at both truncate and extending
272  * writes.
273  */
274 static int ll_ap_refresh_count(void *data, int cmd)
275 {
276         struct ll_async_page *llap;
277         struct lov_stripe_md *lsm;
278         struct page *page;
279         __u64 kms;
280         ENTRY;
281
282         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
283         LASSERT(cmd != OBD_BRW_READ);
284
285         llap = llap_from_cookie(data);
286         if (IS_ERR(llap))
287                 RETURN(PTR_ERR(llap));
288
289         page = llap->llap_page;
290         lsm = ll_i2info(page->mapping->host)->lli_smd;
291         kms = lov_merge_size(lsm, 1);
292
293         /* catch race with truncate */
294         if (((__u64)page->index << PAGE_SHIFT) >= kms)
295                 return 0;
296
297         /* catch sub-page write at end of file */
298         if (((__u64)page->index << PAGE_SHIFT) + PAGE_SIZE > kms)
299                 return kms % PAGE_SIZE;
300
301         return PAGE_SIZE;
302 }
303
304 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
305 {
306         struct lov_stripe_md *lsm;
307         obd_flag valid_flags;
308
309         lsm = ll_i2info(inode)->lli_smd;
310
311         oa->o_id = lsm->lsm_object_id;
312         oa->o_gr = lsm->lsm_object_gr;
313         oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
314         valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
315         if (cmd == OBD_BRW_WRITE) {
316                 oa->o_valid |= OBD_MD_FLIFID | OBD_MD_FLEPOCH;
317                 mdc_pack_id(obdo_id(oa), inode->i_ino, 0, inode->i_mode, 
318                             id_group(&ll_i2info(inode)->lli_id),
319                             id_fid(&ll_i2info(inode)->lli_id));
320
321                 oa->o_easize = ll_i2info(inode)->lli_io_epoch;
322                 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME;
323         }
324
325         obdo_from_inode(oa, inode, valid_flags);
326 }
327
328 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
329 {
330         struct ll_async_page *llap;
331         ENTRY;
332
333         llap = llap_from_cookie(data);
334         if (IS_ERR(llap)) {
335                 EXIT;
336                 return;
337         }
338
339         ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
340         EXIT;
341 }
342
343 static struct obd_async_page_ops ll_async_page_ops = {
344         .ap_make_ready =        ll_ap_make_ready,
345         .ap_refresh_count =     ll_ap_refresh_count,
346         .ap_fill_obdo =         ll_ap_fill_obdo,
347         .ap_completion =        ll_ap_completion,
348 };
349
350 struct ll_async_page *llap_cast_private(struct page *page)
351 {
352         struct ll_async_page *llap = (struct ll_async_page *)page->private;
353
354         LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
355                  "page %p private %lu gave magic %d which != %d\n",
356                  page, page->private, llap->llap_magic, LLAP_MAGIC);
357
358         return llap;
359 }
360
361 /* XXX have the exp be an argument? */
362 struct ll_async_page *llap_from_page(struct page *page)
363 {
364         struct ll_async_page *llap;
365         struct obd_export *exp;
366         struct inode *inode = page->mapping->host;
367         struct ll_sb_info *sbi = ll_i2sbi(inode);
368         int rc;
369         ENTRY;
370
371         llap = llap_cast_private(page);
372         if (llap != NULL)
373                 RETURN(llap);
374
375         exp = ll_i2obdexp(page->mapping->host);
376         if (exp == NULL)
377                 RETURN(ERR_PTR(-EINVAL));
378
379         OBD_ALLOC(llap, sizeof(*llap));
380         if (llap == NULL)
381                 RETURN(ERR_PTR(-ENOMEM));
382         llap->llap_magic = LLAP_MAGIC;
383         rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
384                                  (obd_off)page->index << PAGE_SHIFT,
385                                  &ll_async_page_ops, llap, &llap->llap_cookie);
386         if (rc) {
387                 OBD_FREE(llap, sizeof(*llap));
388                 RETURN(ERR_PTR(rc));
389         }
390
391         CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
392                page, llap->llap_cookie, (obd_off)page->index << PAGE_SHIFT);
393         /* also zeroing the PRIVBITS low order bitflags */
394         __set_page_ll_data(page, llap);
395         llap->llap_page = page;
396
397         spin_lock(&sbi->ll_lock);
398         sbi->ll_pglist_gen++;
399         list_add_tail(&llap->llap_proc_item, &sbi->ll_pglist);
400         spin_unlock(&sbi->ll_lock);
401
402         RETURN(llap);
403 }
404
405 static int queue_or_sync_write(struct obd_export *exp,
406                                struct lov_stripe_md *lsm,
407                                struct ll_async_page *llap,
408                                unsigned to,
409                                obd_flag async_flags)
410 {
411         struct obd_io_group *oig;
412         int rc;
413         ENTRY;
414
415         /* _make_ready only sees llap once we've unlocked the page */
416         llap->llap_write_queued = 1;
417         rc = obd_queue_async_io(exp, lsm, NULL, llap->llap_cookie,
418                                 OBD_BRW_WRITE, 0, 0, 0, async_flags);
419         if (rc == 0) {
420                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
421                 //llap_write_pending(inode, llap);
422                 GOTO(out, 0);
423         }
424
425         llap->llap_write_queued = 0;
426
427         rc = oig_init(&oig);
428         if (rc)
429                 GOTO(out, rc);
430
431         rc = obd_queue_group_io(exp, lsm, NULL, oig, llap->llap_cookie,
432                                 OBD_BRW_WRITE, 0, to, 0, ASYNC_READY |
433                                 ASYNC_URGENT | ASYNC_COUNT_STABLE |
434                                 ASYNC_GROUP_SYNC);
435         if (rc)
436                 GOTO(free_oig, rc);
437
438         rc = obd_trigger_group_io(exp, lsm, NULL, oig);
439         if (rc)
440                 GOTO(free_oig, rc);
441
442         rc = oig_wait(oig);
443
444         if (!rc && async_flags & ASYNC_READY)
445                 unlock_page(llap->llap_page);
446
447         LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n",
448                        rc);
449
450 free_oig:
451         oig_release(oig);
452 out:
453         RETURN(rc);
454 }
455
456 void lov_increase_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
457                       obd_off size);
458
459 /* be careful not to return success without setting the page Uptodate or
460  * the next pass through prepare_write will read in stale data from disk. */
461 int ll_commit_write(struct file *file, struct page *page, unsigned from,
462                     unsigned to)
463 {
464         struct inode *inode = page->mapping->host;
465         struct ll_inode_info *lli = ll_i2info(inode);
466         struct lov_stripe_md *lsm = lli->lli_smd;
467         struct obd_export *exp = NULL;
468         struct ll_async_page *llap;
469         loff_t size;
470         int rc = 0;
471         ENTRY;
472
473         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
474         LASSERT(inode == file->f_dentry->d_inode);
475         LASSERT(PageLocked(page));
476
477         CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
478                inode, page, from, to, page->index);
479
480         llap = llap_from_page(page);
481         if (IS_ERR(llap))
482                 RETURN(PTR_ERR(llap));
483
484         /* queue a write for some time in the future the first time we
485          * dirty the page */
486         if (!PageDirty(page)) {
487                 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
488                                      LPROC_LL_DIRTY_MISSES);
489
490                 exp = ll_i2obdexp(inode);
491                 if (exp == NULL)
492                         RETURN(-EINVAL);
493
494                 rc = queue_or_sync_write(exp, ll_i2info(inode)->lli_smd, llap,
495                                          to, 0);
496                 if (rc)
497                         GOTO(out, rc);
498         } else {
499                 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
500                                      LPROC_LL_DIRTY_HITS);
501         }
502
503         /* put the page in the page cache, from now on ll_removepage is
504          * responsible for cleaning up the llap.
505          * don't dirty the page if it has been write out in q_o_s_w */
506         if (llap->llap_write_queued)
507                 set_page_dirty(page);
508
509 out:
510         if (rc == 0) {
511                 size = (((obd_off)page->index) << PAGE_SHIFT) + to;
512                 lov_increase_kms(exp, lsm, size);
513                 if (size > inode->i_size)
514                         inode->i_size = size;
515                 SetPageUptodate(page);
516         }
517         RETURN(rc);
518 }
519                                                                                                                                                                                                      
520 int ll_writepage(struct page *page)
521 {
522         struct inode *inode = page->mapping->host;
523         struct obd_export *exp;
524         struct ll_async_page *llap;
525         int rc = 0;
526         ENTRY;
527                                                                                                                                                                                                      
528         LASSERT(!PageDirty(page));
529         LASSERT(PageLocked(page));
530                                                                                                                                                                                                      
531         exp = ll_i2obdexp(inode);
532         if (exp == NULL)
533                 GOTO(out, rc = -EINVAL);
534                                                                                                                                                                                                      
535         llap = llap_from_page(page);
536         if (IS_ERR(llap))
537                 GOTO(out, rc = PTR_ERR(llap));
538                                                                                                                                                                                                      
539         page_cache_get(page);
540         if (llap->llap_write_queued) {
541                 LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
542                 rc = obd_set_async_flags(exp, ll_i2info(inode)->lli_smd, NULL,
543                                          llap->llap_cookie,
544                                          ASYNC_READY | ASYNC_URGENT);
545         } else {
546                 rc = queue_or_sync_write(exp, ll_i2info(inode)->lli_smd, llap,
547                                          PAGE_SIZE, ASYNC_READY |
548                                          ASYNC_URGENT);
549         }
550         if (rc)
551                 page_cache_release(page);
552 out:
553         if (rc)
554                 unlock_page(page);
555         RETURN(rc);
556 }
557
558 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
559 {
560         struct ll_ra_info *ra = &sbi->ll_ra_info;
561         unsigned long ret;
562         ENTRY;
563
564         spin_lock(&sbi->ll_lock);
565         ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
566         ra->ra_cur_pages += ret;
567         spin_unlock(&sbi->ll_lock);
568
569         RETURN(ret);
570 }
571
572 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
573 {
574         struct ll_ra_info *ra = &sbi->ll_ra_info;
575         spin_lock(&sbi->ll_lock);
576         LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
577                  ra->ra_cur_pages, len);
578         ra->ra_cur_pages -= len;
579         spin_unlock(&sbi->ll_lock);
580 }
581
582 /* called for each page in a completed rpc.*/
583 void ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
584 {
585         struct ll_async_page *llap;
586         struct page *page;
587         ENTRY;
588
589         llap = llap_from_cookie(data);
590         if (IS_ERR(llap)) {
591                 EXIT;
592                 return;
593         }
594
595         page = llap->llap_page;
596         LASSERT(PageLocked(page));
597
598         LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
599
600         if (cmd == OBD_BRW_READ && llap->llap_defer_uptodate)
601                 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
602
603         if (rc == 0)  {
604                 if (cmd == OBD_BRW_READ) {
605                         if (!llap->llap_defer_uptodate)
606                                 SetPageUptodate(page);
607                 } else {
608                         llap->llap_write_queued = 0;
609                 }
610                 ClearPageError(page);
611         } else {
612                 if (cmd == OBD_BRW_READ)
613                         llap->llap_defer_uptodate = 0;
614                 SetPageError(page);
615         }
616
617         unlock_page(page);
618
619         if (0 && cmd == OBD_BRW_WRITE) {
620                 llap_write_complete(page->mapping->host, llap);
621                 ll_try_done_writing(page->mapping->host);
622         }
623         
624         if (PageWriteback(page)) {
625                 end_page_writeback(page);
626         }
627         page_cache_release(page);
628         EXIT;
629 }
630
631 /* the kernel calls us here when a page is unhashed from the page cache.
632  * the page will be locked and the kernel is holding a spinlock, so
633  * we need to be careful.  we're just tearing down our book-keeping
634  * here. */
635 void ll_removepage(struct page *page)
636 {
637         struct inode *inode = page->mapping->host;
638         struct obd_export *exp;
639         struct ll_async_page *llap;
640         struct ll_sb_info *sbi = ll_i2sbi(inode);
641         int rc;
642         ENTRY;
643
644         LASSERT(!in_interrupt());
645
646         /* sync pages or failed read pages can leave pages in the page
647          * cache that don't have our data associated with them anymore */
648         if (page->private == 0) {
649                 EXIT;
650                 return;
651         }
652
653         LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
654
655         exp = ll_i2obdexp(inode);
656         if (exp == NULL) {
657                 CERROR("page %p ind %lu gave null export\n", page, page->index);
658                 EXIT;
659                 return;
660         }
661
662         llap = llap_from_page(page);
663         if (IS_ERR(llap)) {
664                 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
665                        page->index, PTR_ERR(llap));
666                 EXIT;
667                 return;
668         }
669
670         //llap_write_complete(inode, llap);
671         rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
672                                      llap->llap_cookie);
673         if (rc != 0)
674                 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
675
676         /* this unconditional free is only safe because the page lock
677          * is providing exclusivity to memory pressure/truncate/writeback..*/
678         __clear_page_ll_data(page);
679
680         spin_lock(&sbi->ll_lock);
681         if (!list_empty(&llap->llap_proc_item))
682                 list_del_init(&llap->llap_proc_item);
683         sbi->ll_pglist_gen++;
684         spin_unlock(&sbi->ll_lock);
685         OBD_FREE(llap, sizeof(*llap));
686         EXIT;
687 }
688
689 static int ll_page_matches(struct page *page, int fd_flags)
690 {
691         struct lustre_handle match_lockh = {0};
692         struct inode *inode = page->mapping->host;
693         ldlm_policy_data_t page_extent;
694         int flags, matches;
695         ENTRY;
696
697         if (fd_flags & LL_FILE_GROUP_LOCKED)
698                 RETURN(1);
699
700         page_extent.l_extent.start = (__u64)page->index << PAGE_CACHE_SHIFT;
701         page_extent.l_extent.end =
702                 page_extent.l_extent.start + PAGE_CACHE_SIZE - 1;
703         flags = LDLM_FL_CBPENDING | LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
704         matches = obd_match(ll_i2sbi(inode)->ll_lov_exp,
705                             ll_i2info(inode)->lli_smd, LDLM_EXTENT,
706                             &page_extent, LCK_PR | LCK_PW, &flags, inode,
707                             &match_lockh);
708         RETURN(matches);
709 }
710
711 static int ll_issue_page_read(struct obd_export *exp,
712                               struct ll_async_page *llap,
713                               struct obd_io_group *oig, int defer)
714 {
715         struct page *page = llap->llap_page;
716         int rc;
717
718         page_cache_get(page);
719         llap->llap_defer_uptodate = defer;
720         llap->llap_ra_used = 0;
721         rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
722                                 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
723                                 PAGE_SIZE, 0, ASYNC_COUNT_STABLE | ASYNC_READY
724                                               | ASYNC_URGENT);
725         if (rc) {
726                 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
727                 page_cache_release(page);
728         }
729         RETURN(rc);
730 }
731
732 static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
733 {
734         LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
735         ra->ra_stats[which]++;
736 }
737
738 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
739 {
740         struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
741         struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
742
743         spin_lock(&sbi->ll_lock);
744         ll_ra_stats_inc_unlocked(ra, which);
745         spin_unlock(&sbi->ll_lock);
746 }
747
748 void ll_ra_accounting(struct page *page, struct address_space *mapping)
749 {
750         struct ll_async_page *llap;
751
752         llap = llap_from_page(page);
753         if (IS_ERR(llap))
754                 return;
755
756         if (!llap->llap_defer_uptodate || llap->llap_ra_used)
757                 return;
758
759         ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
760 }
761
762 #define RAS_CDEBUG(ras) \
763         CDEBUG(D_READA, "lrp %lu c %lu ws %lu wl %lu nra %lu\n",        \
764                ras->ras_last_readpage, ras->ras_consecutive,            \
765                ras->ras_window_start, ras->ras_window_len,              \
766                ras->ras_next_readahead);
767
768 static int index_in_window(unsigned long index, unsigned long point,
769                            unsigned long before, unsigned long after)
770 {
771         unsigned long start = point - before, end = point + after;
772
773         if (start > point)
774                start = 0;
775         if (end < point)
776                end = ~0;
777
778         return start <= index && index <= end;
779 }
780
781 static int ll_readahead(struct ll_readahead_state *ras,
782                          struct obd_export *exp, struct address_space *mapping,
783                          struct obd_io_group *oig, int flags)
784 {
785         unsigned long i, start = 0, end = 0, reserved;
786         struct ll_async_page *llap;
787         struct page *page;
788         int rc, ret = 0, match_failed = 0;
789         __u64 kms;
790         ENTRY;
791
792         kms = lov_merge_size(ll_i2info(mapping->host)->lli_smd, 1);
793         if (kms == 0) {
794                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
795                 RETURN(0);
796         }
797         spin_lock(&ras->ras_lock);
798
799         /* reserve a part of the read-ahead window that we'll be issuing */
800         if (ras->ras_window_len) {
801                 start = ras->ras_next_readahead;
802                 end = ras->ras_window_start + ras->ras_window_len - 1;
803                 end = min(end, (unsigned long)(kms >> PAGE_CACHE_SHIFT));
804                 ras->ras_next_readahead = max(end, end + 1);
805
806                 RAS_CDEBUG(ras);
807         }
808
809         spin_unlock(&ras->ras_lock);
810
811         if (end == 0) {
812                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
813                 RETURN(0);
814         }
815
816         reserved = ll_ra_count_get(ll_i2sbi(mapping->host), end - start + 1);
817         if (reserved < end - start + 1)
818                 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
819
820         for (i = start; reserved > 0 && !match_failed && i <= end; i++) {
821                 /* skip locked pages from previous readpage calls */
822                 page = grab_cache_page_nowait(mapping, i);
823                 if (page == NULL) {
824                         CDEBUG(D_READA, "g_c_p_n failed\n");
825                         continue;
826                 }
827                 
828                 /* we do this first so that we can see the page in the /proc
829                  * accounting */
830                 llap = llap_from_page(page);
831                 if (IS_ERR(llap) || llap->llap_defer_uptodate)
832                         goto next_page;
833
834                 /* skip completed pages */
835                 if (Page_Uptodate(page))
836                         goto next_page;
837
838                 /* bail when we hit the end of the lock. */
839                 if ((rc = ll_page_matches(page, flags)) <= 0) {
840                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
841                                        "lock match failed: rc %d\n", rc);
842                         ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
843                         match_failed = 1;
844                         goto next_page;
845                 }
846
847                 rc = ll_issue_page_read(exp, llap, oig, 1);
848                 if (rc == 0) {
849                         reserved--;
850                         ret++;
851                         LL_CDEBUG_PAGE(D_READA| D_PAGE, page, 
852                                        "started read-ahead\n");
853                 }
854                 if (rc) {
855         next_page:
856                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page, 
857                                        "skipping read-ahead\n");
858
859                         unlock_page(page);
860                 }
861                 page_cache_release(page);
862         }
863
864         LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
865         if (reserved != 0)
866                 ll_ra_count_put(ll_i2sbi(mapping->host), reserved);
867
868         if (i == end + 1 && end == (kms >> PAGE_CACHE_SHIFT))
869                 ll_ra_stats_inc(mapping, RA_STAT_EOF);
870
871         /* if we didn't get to the end of the region we reserved from
872          * the ras we need to go back and update the ras so that the
873          * next read-ahead tries from where we left off.  we only do so
874          * if the region we failed to issue read-ahead on is still ahead
875          * of the app and behind the next index to start read-ahead from */
876         if (i != end + 1) {
877                 spin_lock(&ras->ras_lock);
878                 if (i < ras->ras_next_readahead &&
879                     index_in_window(i, ras->ras_window_start, 0,
880                                     ras->ras_window_len)) {
881                         ras->ras_next_readahead = i;
882                         RAS_CDEBUG(ras);
883                 }
884                 spin_unlock(&ras->ras_lock);
885         }
886
887         RETURN(ret);
888 }
889
890 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
891 {
892         ras->ras_window_start = index & (~(PTLRPC_MAX_BRW_PAGES - 1));
893 }
894
895 /* called with the ras_lock held or from places where it doesn't matter */
896 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
897 {
898         ras->ras_last_readpage = index;
899         ras->ras_consecutive = 1;
900         ras->ras_window_len = 0;
901         ras_set_start(ras, index);
902         ras->ras_next_readahead = ras->ras_window_start;
903
904         RAS_CDEBUG(ras);
905 }
906
907 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
908 {
909         spin_lock_init(&ras->ras_lock);
910         ras_reset(ras, 0);
911 }
912
913 static void ras_update(struct ll_sb_info *sbi, struct ll_readahead_state *ras,
914                        unsigned long index, unsigned hit)
915 {
916         struct ll_ra_info *ra = &sbi->ll_ra_info;
917         int zero = 0;
918         ENTRY;
919
920         spin_lock(&sbi->ll_lock);
921         spin_lock(&ras->ras_lock);
922         
923         ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
924
925         /* reset the read-ahead window in two cases.  First when the app seeks
926          * or reads to some other part of the file.  Secondly if we get a
927          * read-ahead miss that we think we've previously issued.  This can
928          * be a symptom of there being so many read-ahead pages that the VM is
929          * reclaiming it before we get to it. */
930         if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
931                 zero = 1;
932                 ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
933         } else if (!hit && ras->ras_window_len &&
934                    index < ras->ras_next_readahead &&
935                    index_in_window(index, ras->ras_window_start, 0,
936                                    ras->ras_window_len)) {
937                 zero = 1;
938                 ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
939         }
940
941         if (zero) {
942                 ras_reset(ras, index);
943                 GOTO(out_unlock, 0);
944         }
945
946         ras->ras_last_readpage = index;
947         ras->ras_consecutive++;
948         ras_set_start(ras, index);
949         ras->ras_next_readahead = max(ras->ras_window_start,
950                                       ras->ras_next_readahead);
951
952         /* wait for a few pages to arrive before issuing readahead to avoid
953          * the worst overutilization */
954         if (ras->ras_consecutive == 3) {
955                 ras->ras_window_len = PTLRPC_MAX_BRW_PAGES;
956                 GOTO(out_unlock, 0);
957         }
958
959         /* we need to increase the window sometimes.  we'll arbitrarily
960          * do it half-way through the pages in an rpc */
961         if ((index & (PTLRPC_MAX_BRW_PAGES - 1)) == 
962             (PTLRPC_MAX_BRW_PAGES >> 1)) {
963                 ras->ras_window_len += PTLRPC_MAX_BRW_PAGES;
964                 ras->ras_window_len = min(ras->ras_window_len,
965                                           ra->ra_max_pages);
966
967         }
968
969         EXIT;
970 out_unlock:
971         RAS_CDEBUG(ras);
972         spin_unlock(&ras->ras_lock);
973         spin_unlock(&sbi->ll_lock);
974         return;
975 }
976
977 /*
978  * for now we do our readpage the same on both 2.4 and 2.5.  The kernel's
979  * read-ahead assumes it is valid to issue readpage all the way up to
980  * i_size, but our dlm locks make that not the case.  We disable the
981  * kernel's read-ahead and do our own by walking ahead in the page cache
982  * checking for dlm lock coverage.  the main difference between 2.4 and
983  * 2.6 is how read-ahead gets batched and issued, but we're using our own,
984  * so they look the same.
985  */
986 int ll_readpage(struct file *filp, struct page *page)
987 {
988         struct ll_file_data *fd = filp->private_data;
989         struct inode *inode = page->mapping->host;
990         struct obd_export *exp;
991         struct ll_async_page *llap;
992         struct obd_io_group *oig = NULL;
993         int rc;
994         ENTRY;
995
996         LASSERT(PageLocked(page));
997         LASSERT(!PageUptodate(page));
998         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset="LPX64"\n",
999                inode->i_ino, inode->i_generation, inode,
1000                (((obd_off)page->index) << PAGE_SHIFT));
1001         LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
1002
1003         rc = oig_init(&oig);
1004         if (rc < 0)
1005                 GOTO(out, rc);
1006
1007         exp = ll_i2obdexp(inode);
1008         if (exp == NULL)
1009                 GOTO(out, rc = -EINVAL);
1010
1011         llap = llap_from_page(page);
1012         if (IS_ERR(llap))
1013                 GOTO(out, rc = PTR_ERR(llap));
1014
1015         if (ll_i2sbi(inode)->ll_flags & LL_SBI_READAHEAD)
1016                 ras_update(ll_i2sbi(inode), &fd->fd_ras, page->index,
1017                            llap->llap_defer_uptodate);
1018         
1019         if (llap->llap_defer_uptodate) {
1020                 llap->llap_ra_used = 1;
1021                 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1022                                   fd->fd_flags);
1023                 if (rc > 0)
1024                         obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, 
1025                                              NULL, oig);
1026                 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
1027                 SetPageUptodate(page);
1028                 unlock_page(page);
1029                 GOTO(out_oig, rc = 0);
1030         }
1031
1032         rc = ll_page_matches(page, fd->fd_flags);
1033         if (rc < 0) {
1034                 LL_CDEBUG_PAGE(D_ERROR, page, "lock match failed: rc %d\n", rc);
1035                 GOTO(out, rc);
1036         }
1037
1038         if (rc == 0) {
1039                 CWARN("ino %lu page %lu (%llu) not covered by "
1040                       "a lock (mmap?).  check debug logs.\n",
1041                       inode->i_ino, page->index,
1042                       (long long)page->index << PAGE_CACHE_SHIFT);
1043         }
1044
1045         rc = ll_issue_page_read(exp, llap, oig, 0);
1046         if (rc)
1047                 GOTO(out, rc);
1048
1049         LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
1050         if (ll_i2sbi(inode)->ll_flags & LL_SBI_READAHEAD)
1051                 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1052                              fd->fd_flags);
1053
1054         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
1055
1056 out:
1057         if (rc)
1058                 unlock_page(page);
1059 out_oig:
1060         if (oig != NULL)
1061                 oig_release(oig);
1062         RETURN(rc);
1063 }