Whamcloud - gitweb
- changes with names on exports and another fields in llite and mds. lov_exp is
[fs/lustre-release.git] / lustre / llite / rw.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Lite I/O page cache routines shared by different kernel revs
5  *
6  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #include <linux/config.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/string.h>
28 #include <linux/stat.h>
29 #include <linux/errno.h>
30 #include <linux/smp_lock.h>
31 #include <linux/unistd.h>
32 #include <linux/version.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
35
36 #include <linux/fs.h>
37 #include <linux/stat.h>
38 #include <asm/uaccess.h>
39 #include <asm/segment.h>
40 #include <linux/mm.h>
41 #include <linux/pagemap.h>
42 #include <linux/smp_lock.h>
43
44 #define DEBUG_SUBSYSTEM S_LLITE
45
46 #include <linux/lustre_mds.h>
47 #include <linux/lustre_lite.h>
48 #include "llite_internal.h"
49 #include <linux/lustre_compat25.h>
50
51 #ifndef list_for_each_prev_safe
52 #define list_for_each_prev_safe(pos, n, head) \
53         for (pos = (head)->prev, n = pos->prev; pos != (head); \
54                 pos = n, n = pos->prev )
55 #endif
56
57 /* SYNCHRONOUS I/O to object storage for an inode */
58 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
59                   struct page *page, int flags)
60 {
61         struct ll_inode_info *lli = ll_i2info(inode);
62         struct lov_stripe_md *lsm = lli->lli_smd;
63         struct timeval start;
64         struct brw_page pg;
65         int rc;
66         ENTRY;
67
68         do_gettimeofday(&start);
69
70         pg.pg = page;
71         pg.disk_offset = pg.page_offset = ((obd_off)page->index) << PAGE_SHIFT;
72
73         if (cmd == OBD_BRW_WRITE &&
74             (pg.disk_offset + PAGE_SIZE > inode->i_size))
75                 pg.count = inode->i_size % PAGE_SIZE;
76         else
77                 pg.count = PAGE_SIZE;
78
79         CDEBUG(D_PAGE, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
80                cmd & OBD_BRW_WRITE ? "write" : "read", pg.count, inode->i_ino,
81                pg.disk_offset, pg.disk_offset);
82         if (pg.count == 0) {
83                 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
84                        LPU64"\n", inode->i_ino, inode, inode->i_size,
85                        page->mapping->host, page->mapping->host->i_size,
86                        page->index, pg.disk_offset);
87         }
88
89         pg.flag = flags;
90
91         if (cmd == OBD_BRW_WRITE)
92                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
93                                     LPROC_LL_BRW_WRITE, pg.count);
94         else
95                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
96                                     LPROC_LL_BRW_READ, pg.count);
97         rc = obd_brw(cmd, ll_i2dtexp(inode), oa, lsm, 1, &pg, NULL);
98         if (rc == 0)
99                 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
100         else if (rc != -EIO)
101                 CERROR("error from obd_brw: rc = %d\n", rc);
102         ll_stime_record(ll_i2sbi(inode), &start,
103                         &ll_i2sbi(inode)->ll_brw_stime);
104         RETURN(rc);
105 }
106
107 /* this isn't where truncate starts.   roughly:
108  * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate
109  * we grab the lock back in setattr_raw to avoid races. */
110 void ll_truncate(struct inode *inode)
111 {
112         struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
113         struct obdo *oa = NULL;
114         int rc;
115         ENTRY;
116
117         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
118                inode->i_generation, inode);
119
120         if (!lsm) {
121                 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
122                        inode->i_ino);
123                 EXIT;
124                 return;
125         }
126
127         oa = obdo_alloc();
128         if (oa == NULL) {
129                 CERROR("cannot alloc oa, error %d\n",
130                         -ENOMEM);
131                 EXIT;
132                 return;
133         }
134
135         oa->o_id = lsm->lsm_object_id;
136         oa->o_gr = lsm->lsm_object_gr;
137         oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
138         obdo_from_inode(oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |
139                         OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
140
141         CDEBUG(D_INFO, "calling punch for "LPX64" (all bytes after %Lu)\n",
142                oa->o_id, inode->i_size);
143
144         /* truncate == punch from new size to absolute end of file */
145         /* NB: obd_punch must be called with i_sem held!  It updates the kms! */
146         rc = obd_punch(ll_i2dtexp(inode), oa, lsm, inode->i_size,
147                        OBD_OBJECT_EOF, NULL);
148         if (rc)
149                 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
150         else
151                 obdo_to_inode(inode, oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
152                               OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
153
154         obdo_free(oa);
155         EXIT;
156         return;
157 } /* ll_truncate */
158
159 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
160 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
161                      unsigned to)
162 {
163         struct inode *inode = page->mapping->host;
164         struct ll_inode_info *lli = ll_i2info(inode);
165         struct lov_stripe_md *lsm = lli->lli_smd;
166         obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
167         struct obdo *oa = NULL;
168         struct brw_page pga;
169         __u64 kms;
170         int rc = 0;
171         ENTRY;
172
173         LASSERT(PageLocked(page));
174         (void)llap_cast_private(page); /* assertion */
175
176         /* Check to see if we should return -EIO right away */
177         pga.pg = page;
178         pga.disk_offset = pga.page_offset = offset;
179         pga.count = PAGE_SIZE;
180         pga.flag = 0;
181
182         oa = obdo_alloc();
183         if (oa == NULL)
184                 RETURN(-ENOMEM);
185
186         oa->o_id = lsm->lsm_object_id;
187         oa->o_gr = lsm->lsm_object_gr;
188         oa->o_mode = inode->i_mode;
189         oa->o_valid = OBD_MD_FLID | OBD_MD_FLMODE |
190                 OBD_MD_FLTYPE | OBD_MD_FLGROUP;
191
192         rc = obd_brw(OBD_BRW_CHECK, ll_i2dtexp(inode), oa, lsm,
193                      1, &pga, NULL);
194         if (rc)
195                 GOTO(out_free_oa, rc);
196
197         if (PageUptodate(page))
198                 GOTO(out_free_oa, 0);
199
200         /* We're completely overwriting an existing page, so _don't_ set it up
201          * to date until commit_write */
202         if (from == 0 && to == PAGE_SIZE) {
203                 POISON_PAGE(page, 0x11);
204                 GOTO(out_free_oa, 0);
205         }
206
207         /* If are writing to a new page, no need to read old data.  The extent
208          * locking will have updated the KMS, and for our purposes here we can
209          * treat it like i_size. */
210         kms = lov_merge_size(lsm, 1);
211         if (kms <= offset) {
212                 memset(kmap(page), 0, PAGE_SIZE);
213                 kunmap(page);
214                 GOTO(prepare_done, rc = 0);
215         }
216
217         /* XXX could be an async ocp read.. read-ahead? */
218         rc = ll_brw(OBD_BRW_READ, inode, oa, page, 0);
219         if (rc == 0) {
220                 /* bug 1598: don't clobber blksize */
221                 oa->o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
222                 obdo_refresh_inode(inode, oa, oa->o_valid);
223         }
224
225         EXIT;
226 prepare_done:
227         if (rc == 0)
228                 SetPageUptodate(page);
229 out_free_oa:
230         obdo_free(oa);
231         return rc;
232 }
233
234 struct ll_async_page *llap_from_cookie(void *cookie)
235 {
236         struct ll_async_page *llap = cookie;
237         if (llap->llap_magic != LLAP_MAGIC)
238                 return ERR_PTR(-EINVAL);
239         return llap;
240 };
241
242 static int ll_ap_make_ready(void *data, int cmd)
243 {
244         struct ll_async_page *llap;
245         struct page *page;
246         ENTRY;
247
248         llap = llap_from_cookie(data);
249         if (IS_ERR(llap))
250                 RETURN(-EINVAL);
251
252         page = llap->llap_page;
253
254         LASSERT(cmd != OBD_BRW_READ);
255
256         /* we're trying to write, but the page is locked.. come back later */
257         if (TryLockPage(page))
258                 RETURN(-EAGAIN);
259
260         LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
261         page_cache_get(page);
262
263         /* if we left PageDirty we might get another writepage call
264          * in the future.  list walkers are bright enough
265          * to check page dirty so we can leave it on whatever list
266          * its on.  XXX also, we're called with the cli list so if
267          * we got the page cache list we'd create a lock inversion
268          * with the removepage path which gets the page lock then the
269          * cli lock */
270         clear_page_dirty(page);
271         RETURN(0);
272 }
273
274 /* We have two reasons for giving llite the opportunity to change the 
275  * write length of a given queued page as it builds the RPC containing
276  * the page: 
277  *
278  * 1) Further extending writes may have landed in the page cache
279  *    since a partial write first queued this page requiring us
280  *    to write more from the page cache.
281  * 2) We might have raced with truncate and want to avoid performing
282  *    write RPCs that are just going to be thrown away by the 
283  *    truncate's punch on the storage targets.
284  *
285  * The kms serves these purposes as it is set at both truncate and extending
286  * writes.
287  */
288 static int ll_ap_refresh_count(void *data, int cmd)
289 {
290         struct ll_async_page *llap;
291         struct lov_stripe_md *lsm;
292         struct page *page;
293         __u64 kms;
294         ENTRY;
295
296         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
297         LASSERT(cmd != OBD_BRW_READ);
298
299         llap = llap_from_cookie(data);
300         if (IS_ERR(llap))
301                 RETURN(PTR_ERR(llap));
302
303         page = llap->llap_page;
304         lsm = ll_i2info(page->mapping->host)->lli_smd;
305         kms = lov_merge_size(lsm, 1);
306
307         /* catch race with truncate */
308         if (((__u64)page->index << PAGE_SHIFT) >= kms)
309                 return 0;
310
311         /* catch sub-page write at end of file */
312         if (((__u64)page->index << PAGE_SHIFT) + PAGE_SIZE > kms)
313                 return kms % PAGE_SIZE;
314
315         return PAGE_SIZE;
316 }
317
318 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
319 {
320         struct lov_stripe_md *lsm;
321         obd_valid valid_flags;
322
323         lsm = ll_i2info(inode)->lli_smd;
324
325         oa->o_id = lsm->lsm_object_id;
326         oa->o_gr = lsm->lsm_object_gr;
327         oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
328         valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
329         if (cmd == OBD_BRW_WRITE) {
330                 oa->o_valid |= OBD_MD_FLIFID | OBD_MD_FLEPOCH;
331                 mdc_pack_id(obdo_id(oa), inode->i_ino, 0, inode->i_mode, 
332                             id_group(&ll_i2info(inode)->lli_id),
333                             id_fid(&ll_i2info(inode)->lli_id));
334
335                 oa->o_easize = ll_i2info(inode)->lli_io_epoch;
336                 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME;
337         }
338
339         obdo_from_inode(oa, inode, valid_flags);
340 }
341
342 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
343 {
344         struct ll_async_page *llap;
345         ENTRY;
346
347         llap = llap_from_cookie(data);
348         if (IS_ERR(llap)) {
349                 EXIT;
350                 return;
351         }
352
353         ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
354         EXIT;
355 }
356
357 static struct obd_async_page_ops ll_async_page_ops = {
358         .ap_make_ready =        ll_ap_make_ready,
359         .ap_refresh_count =     ll_ap_refresh_count,
360         .ap_fill_obdo =         ll_ap_fill_obdo,
361         .ap_completion =        ll_ap_completion,
362 };
363
364 struct ll_async_page *llap_cast_private(struct page *page)
365 {
366         struct ll_async_page *llap = (struct ll_async_page *)page->private;
367
368         LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
369                  "page %p private %lu gave magic %d which != %d\n",
370                  page, page->private, llap->llap_magic, LLAP_MAGIC);
371
372         return llap;
373 }
374
375 /* XXX have the exp be an argument? */
376 struct ll_async_page *llap_from_page(struct page *page)
377 {
378         struct ll_async_page *llap;
379         struct obd_export *exp;
380         struct inode *inode = page->mapping->host;
381         struct ll_sb_info *sbi = ll_i2sbi(inode);
382         int rc;
383         ENTRY;
384
385         llap = llap_cast_private(page);
386         if (llap != NULL)
387                 RETURN(llap);
388
389         exp = ll_i2dtexp(page->mapping->host);
390         if (exp == NULL)
391                 RETURN(ERR_PTR(-EINVAL));
392
393         OBD_ALLOC(llap, sizeof(*llap));
394         if (llap == NULL)
395                 RETURN(ERR_PTR(-ENOMEM));
396         llap->llap_magic = LLAP_MAGIC;
397         rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
398                                  (obd_off)page->index << PAGE_SHIFT,
399                                  &ll_async_page_ops, llap, &llap->llap_cookie);
400         if (rc) {
401                 OBD_FREE(llap, sizeof(*llap));
402                 RETURN(ERR_PTR(rc));
403         }
404
405         CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
406                page, llap->llap_cookie, (obd_off)page->index << PAGE_SHIFT);
407         /* also zeroing the PRIVBITS low order bitflags */
408         __set_page_ll_data(page, llap);
409         llap->llap_page = page;
410
411         spin_lock(&sbi->ll_lock);
412         sbi->ll_pglist_gen++;
413         list_add_tail(&llap->llap_proc_item, &sbi->ll_pglist);
414         spin_unlock(&sbi->ll_lock);
415
416         RETURN(llap);
417 }
418
419 static int queue_or_sync_write(struct obd_export *exp,
420                                struct lov_stripe_md *lsm,
421                                struct ll_async_page *llap,
422                                unsigned to,
423                                obd_flags async_flags)
424 {
425         struct obd_io_group *oig;
426         int rc;
427         ENTRY;
428
429         /* _make_ready only sees llap once we've unlocked the page */
430         llap->llap_write_queued = 1;
431         rc = obd_queue_async_io(exp, lsm, NULL, llap->llap_cookie,
432                                 OBD_BRW_WRITE, 0, 0, 0, async_flags);
433         if (rc == 0) {
434                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
435                 //llap_write_pending(inode, llap);
436                 GOTO(out, 0);
437         }
438
439         llap->llap_write_queued = 0;
440
441         rc = oig_init(&oig);
442         if (rc)
443                 GOTO(out, rc);
444
445         rc = obd_queue_group_io(exp, lsm, NULL, oig, llap->llap_cookie,
446                                 OBD_BRW_WRITE, 0, to, 0, ASYNC_READY |
447                                 ASYNC_URGENT | ASYNC_COUNT_STABLE |
448                                 ASYNC_GROUP_SYNC);
449         if (rc)
450                 GOTO(free_oig, rc);
451
452         rc = obd_trigger_group_io(exp, lsm, NULL, oig);
453         if (rc)
454                 GOTO(free_oig, rc);
455
456         rc = oig_wait(oig);
457
458         if (!rc && async_flags & ASYNC_READY)
459                 unlock_page(llap->llap_page);
460
461         LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
462                        "sync write returned %d\n", rc);
463
464         EXIT;
465 free_oig:
466         oig_release(oig);
467 out:
468         return rc;
469 }
470
471 void lov_increase_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
472                       obd_off size);
473
474 /* be careful not to return success without setting the page Uptodate or
475  * the next pass through prepare_write will read in stale data from disk. */
476 int ll_commit_write(struct file *file, struct page *page, unsigned from,
477                     unsigned to)
478 {
479         struct inode *inode = page->mapping->host;
480         struct ll_inode_info *lli = ll_i2info(inode);
481         struct lov_stripe_md *lsm = lli->lli_smd;
482         struct obd_export *exp = NULL;
483         struct ll_async_page *llap;
484         loff_t size;
485         int rc = 0;
486         ENTRY;
487
488         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
489         LASSERT(inode == file->f_dentry->d_inode);
490         LASSERT(PageLocked(page));
491
492         CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
493                inode, page, from, to, page->index);
494
495         llap = llap_from_page(page);
496         if (IS_ERR(llap))
497                 RETURN(PTR_ERR(llap));
498
499         /* queue a write for some time in the future the first time we
500          * dirty the page */
501         if (!PageDirty(page)) {
502                 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
503                                      LPROC_LL_DIRTY_MISSES);
504
505                 exp = ll_i2dtexp(inode);
506                 if (exp == NULL)
507                         RETURN(-EINVAL);
508
509                 rc = queue_or_sync_write(exp, ll_i2info(inode)->lli_smd, llap,
510                                          to, 0);
511                 if (rc)
512                         GOTO(out, rc);
513         } else {
514                 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
515                                      LPROC_LL_DIRTY_HITS);
516         }
517
518         /* put the page in the page cache, from now on ll_removepage is
519          * responsible for cleaning up the llap.
520          * don't dirty the page if it has been write out in q_o_s_w */
521         if (llap->llap_write_queued)
522                 set_page_dirty(page);
523         EXIT;
524 out:
525         if (rc == 0) {
526                 size = (((obd_off)page->index) << PAGE_SHIFT) + to;
527                 lov_increase_kms(exp, lsm, size);
528                 if (size > inode->i_size)
529                         inode->i_size = size;
530                 SetPageUptodate(page);
531         }
532         return rc;
533 }
534                                                                                                                                                                                                      
535 int ll_writepage(struct page *page)
536 {
537         struct inode *inode = page->mapping->host;
538         struct obd_export *exp;
539         struct ll_async_page *llap;
540         int rc = 0;
541         ENTRY;
542
543         LASSERT(!PageDirty(page));
544         LASSERT(PageLocked(page));
545
546         exp = ll_i2dtexp(inode);
547         if (exp == NULL)
548                 GOTO(out, rc = -EINVAL);
549
550         llap = llap_from_page(page);
551         if (IS_ERR(llap))
552                 GOTO(out, rc = PTR_ERR(llap));
553
554         page_cache_get(page);
555         if (llap->llap_write_queued) {
556                 LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
557                 rc = obd_set_async_flags(exp, ll_i2info(inode)->lli_smd, NULL,
558                                          llap->llap_cookie,
559                                          ASYNC_READY | ASYNC_URGENT);
560         } else {
561                 rc = queue_or_sync_write(exp, ll_i2info(inode)->lli_smd, llap,
562                                          PAGE_SIZE, ASYNC_READY |
563                                          ASYNC_URGENT);
564         }
565         if (rc)
566                 page_cache_release(page);
567         EXIT;
568 out:
569         if (rc)
570                 unlock_page(page);
571         return rc;
572 }
573
574 static unsigned long
575 ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
576 {
577         struct ll_ra_info *ra = &sbi->ll_ra_info;
578         unsigned long ret;
579         ENTRY;
580
581         spin_lock(&sbi->ll_lock);
582         ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
583         ra->ra_cur_pages += ret;
584         spin_unlock(&sbi->ll_lock);
585
586         RETURN(ret);
587 }
588
589 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
590 {
591         struct ll_ra_info *ra = &sbi->ll_ra_info;
592         spin_lock(&sbi->ll_lock);
593         LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
594                  ra->ra_cur_pages, len);
595         ra->ra_cur_pages -= len;
596         spin_unlock(&sbi->ll_lock);
597 }
598
599 /* called for each page in a completed rpc.*/
600 void ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
601 {
602         struct ll_async_page *llap;
603         struct page *page;
604         ENTRY;
605
606         llap = llap_from_cookie(data);
607         if (IS_ERR(llap)) {
608                 EXIT;
609                 return;
610         }
611
612         page = llap->llap_page;
613         LASSERT(PageLocked(page));
614
615         LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
616
617         if (cmd == OBD_BRW_READ && llap->llap_defer_uptodate)
618                 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
619
620         if (rc == 0)  {
621                 if (cmd == OBD_BRW_READ) {
622                         if (!llap->llap_defer_uptodate)
623                                 SetPageUptodate(page);
624                 } else {
625                         llap->llap_write_queued = 0;
626                 }
627                 ClearPageError(page);
628         } else {
629                 if (cmd == OBD_BRW_READ)
630                         llap->llap_defer_uptodate = 0;
631                 SetPageError(page);
632         }
633
634         unlock_page(page);
635
636         if (0 && cmd == OBD_BRW_WRITE) {
637                 llap_write_complete(page->mapping->host, llap);
638                 ll_try_done_writing(page->mapping->host);
639         }
640         
641         if (PageWriteback(page)) {
642                 end_page_writeback(page);
643         }
644         page_cache_release(page);
645         EXIT;
646 }
647
648 /* the kernel calls us here when a page is unhashed from the page cache.
649  * the page will be locked and the kernel is holding a spinlock, so
650  * we need to be careful.  we're just tearing down our book-keeping
651  * here. */
652 void ll_removepage(struct page *page)
653 {
654         struct inode *inode = page->mapping->host;
655         struct obd_export *exp;
656         struct ll_async_page *llap;
657         struct ll_sb_info *sbi = ll_i2sbi(inode);
658         int rc;
659         ENTRY;
660
661         LASSERT(!in_interrupt());
662
663         /* sync pages or failed read pages can leave pages in the page
664          * cache that don't have our data associated with them anymore */
665         if (page->private == 0) {
666                 EXIT;
667                 return;
668         }
669
670         LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
671
672         exp = ll_i2dtexp(inode);
673         if (exp == NULL) {
674                 CERROR("page %p ind %lu gave null export\n", page, page->index);
675                 EXIT;
676                 return;
677         }
678
679         llap = llap_from_page(page);
680         if (IS_ERR(llap)) {
681                 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
682                        page->index, PTR_ERR(llap));
683                 EXIT;
684                 return;
685         }
686
687         //llap_write_complete(inode, llap);
688         rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
689                                      llap->llap_cookie);
690         if (rc != 0)
691                 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
692
693         /* this unconditional free is only safe because the page lock
694          * is providing exclusivity to memory pressure/truncate/writeback..*/
695         __clear_page_ll_data(page);
696
697         spin_lock(&sbi->ll_lock);
698         if (!list_empty(&llap->llap_proc_item))
699                 list_del_init(&llap->llap_proc_item);
700         sbi->ll_pglist_gen++;
701         spin_unlock(&sbi->ll_lock);
702         OBD_FREE(llap, sizeof(*llap));
703         EXIT;
704 }
705
706 static int ll_page_matches(struct page *page, int fd_flags)
707 {
708         struct lustre_handle match_lockh = {0};
709         struct inode *inode = page->mapping->host;
710         ldlm_policy_data_t page_extent;
711         int flags, matches;
712         ENTRY;
713
714         if (fd_flags & LL_FILE_GROUP_LOCKED)
715                 RETURN(1);
716
717         page_extent.l_extent.start = (__u64)page->index << PAGE_CACHE_SHIFT;
718         page_extent.l_extent.end =
719                 page_extent.l_extent.start + PAGE_CACHE_SIZE - 1;
720         flags = LDLM_FL_CBPENDING | LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
721         matches = obd_match(ll_i2sbi(inode)->ll_dt_exp,
722                             ll_i2info(inode)->lli_smd, LDLM_EXTENT,
723                             &page_extent, LCK_PR | LCK_PW, &flags, inode,
724                             &match_lockh);
725         RETURN(matches);
726 }
727
728 static int ll_issue_page_read(struct obd_export *exp,
729                               struct ll_async_page *llap,
730                               struct obd_io_group *oig, int defer)
731 {
732         struct page *page = llap->llap_page;
733         int rc;
734
735         page_cache_get(page);
736         llap->llap_defer_uptodate = defer;
737         llap->llap_ra_used = 0;
738         rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
739                                 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
740                                 PAGE_SIZE, 0, ASYNC_COUNT_STABLE | ASYNC_READY
741                                               | ASYNC_URGENT);
742         if (rc) {
743                 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
744                 page_cache_release(page);
745         }
746         RETURN(rc);
747 }
748
749 static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
750 {
751         LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
752         ra->ra_stats[which]++;
753 }
754
755 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
756 {
757         struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
758         struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
759
760         spin_lock(&sbi->ll_lock);
761         ll_ra_stats_inc_unlocked(ra, which);
762         spin_unlock(&sbi->ll_lock);
763 }
764
765 void ll_ra_accounting(struct page *page, struct address_space *mapping)
766 {
767         struct ll_async_page *llap;
768
769         llap = llap_from_page(page);
770         if (IS_ERR(llap))
771                 return;
772
773         if (!llap->llap_defer_uptodate || llap->llap_ra_used)
774                 return;
775
776         ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
777 }
778
779 #define RAS_CDEBUG(ras) \
780         CDEBUG(D_READA, "lrp %lu c %lu ws %lu wl %lu nra %lu\n",        \
781                ras->ras_last_readpage, ras->ras_consecutive,            \
782                ras->ras_window_start, ras->ras_window_len,              \
783                ras->ras_next_readahead);
784
785 static int index_in_window(unsigned long index, unsigned long point,
786                            unsigned long before, unsigned long after)
787 {
788         unsigned long start = point - before, end = point + after;
789
790         if (start > point)
791                start = 0;
792         if (end < point)
793                end = ~0;
794
795         return start <= index && index <= end;
796 }
797
798 static int ll_readahead(struct ll_readahead_state *ras,
799                          struct obd_export *exp, struct address_space *mapping,
800                          struct obd_io_group *oig, int flags)
801 {
802         unsigned long i, start = 0, end = 0, reserved;
803         struct ll_async_page *llap;
804         struct page *page;
805         int rc, ret = 0, match_failed = 0;
806         __u64 kms;
807         ENTRY;
808
809         kms = lov_merge_size(ll_i2info(mapping->host)->lli_smd, 1);
810         if (kms == 0) {
811                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
812                 RETURN(0);
813         }
814         spin_lock(&ras->ras_lock);
815
816         /* reserve a part of the read-ahead window that we'll be issuing */
817         if (ras->ras_window_len) {
818                 start = ras->ras_next_readahead;
819                 end = ras->ras_window_start + ras->ras_window_len - 1;
820                 end = min(end, (unsigned long)(kms >> PAGE_CACHE_SHIFT));
821                 ras->ras_next_readahead = max(end, end + 1);
822
823                 RAS_CDEBUG(ras);
824         }
825
826         spin_unlock(&ras->ras_lock);
827
828         if (end == 0) {
829                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
830                 RETURN(0);
831         }
832
833         reserved = ll_ra_count_get(ll_i2sbi(mapping->host), end - start + 1);
834         if (reserved < end - start + 1)
835                 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
836
837         for (i = start; reserved > 0 && !match_failed && i <= end; i++) {
838                 /* skip locked pages from previous readpage calls */
839                 page = grab_cache_page_nowait(mapping, i);
840                 if (page == NULL) {
841                         CDEBUG(D_READA, "g_c_p_n failed\n");
842                         continue;
843                 }
844                 
845                 /* we do this first so that we can see the page in the /proc
846                  * accounting */
847                 llap = llap_from_page(page);
848                 if (IS_ERR(llap) || llap->llap_defer_uptodate)
849                         goto next_page;
850
851                 /* skip completed pages */
852                 if (Page_Uptodate(page))
853                         goto next_page;
854
855                 /* bail when we hit the end of the lock. */
856                 if ((rc = ll_page_matches(page, flags)) <= 0) {
857                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
858                                        "lock match failed: rc %d\n", rc);
859                         ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
860                         match_failed = 1;
861                         goto next_page;
862                 }
863
864                 rc = ll_issue_page_read(exp, llap, oig, 1);
865                 if (rc == 0) {
866                         reserved--;
867                         ret++;
868                         LL_CDEBUG_PAGE(D_READA| D_PAGE, page, 
869                                        "started read-ahead\n");
870                 }
871                 if (rc) {
872         next_page:
873                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page, 
874                                        "skipping read-ahead\n");
875
876                         unlock_page(page);
877                 }
878                 page_cache_release(page);
879         }
880
881         LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
882         if (reserved != 0)
883                 ll_ra_count_put(ll_i2sbi(mapping->host), reserved);
884
885         if (i == end + 1 && end == (kms >> PAGE_CACHE_SHIFT))
886                 ll_ra_stats_inc(mapping, RA_STAT_EOF);
887
888         /* if we didn't get to the end of the region we reserved from
889          * the ras we need to go back and update the ras so that the
890          * next read-ahead tries from where we left off.  we only do so
891          * if the region we failed to issue read-ahead on is still ahead
892          * of the app and behind the next index to start read-ahead from */
893         if (i != end + 1) {
894                 spin_lock(&ras->ras_lock);
895                 if (i < ras->ras_next_readahead &&
896                     index_in_window(i, ras->ras_window_start, 0,
897                                     ras->ras_window_len)) {
898                         ras->ras_next_readahead = i;
899                         RAS_CDEBUG(ras);
900                 }
901                 spin_unlock(&ras->ras_lock);
902         }
903
904         RETURN(ret);
905 }
906
907 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
908 {
909         ras->ras_window_start = index & (~(PTLRPC_MAX_BRW_PAGES - 1));
910 }
911
912 /* called with the ras_lock held or from places where it doesn't matter */
913 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
914 {
915         ras->ras_last_readpage = index;
916         ras->ras_consecutive = 1;
917         ras->ras_window_len = 0;
918         ras_set_start(ras, index);
919         ras->ras_next_readahead = ras->ras_window_start;
920
921         RAS_CDEBUG(ras);
922 }
923
924 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
925 {
926         spin_lock_init(&ras->ras_lock);
927         ras_reset(ras, 0);
928 }
929
930 static void ras_update(struct ll_sb_info *sbi, struct ll_readahead_state *ras,
931                        unsigned long index, unsigned hit)
932 {
933         struct ll_ra_info *ra = &sbi->ll_ra_info;
934         int zero = 0;
935         ENTRY;
936
937         spin_lock(&sbi->ll_lock);
938         spin_lock(&ras->ras_lock);
939         
940         ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
941
942         /* reset the read-ahead window in two cases.  First when the app seeks
943          * or reads to some other part of the file.  Secondly if we get a
944          * read-ahead miss that we think we've previously issued.  This can
945          * be a symptom of there being so many read-ahead pages that the VM is
946          * reclaiming it before we get to it. */
947         if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
948                 zero = 1;
949                 ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
950         } else if (!hit && ras->ras_window_len &&
951                    index < ras->ras_next_readahead &&
952                    index_in_window(index, ras->ras_window_start, 0,
953                                    ras->ras_window_len)) {
954                 zero = 1;
955                 ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
956         }
957
958         if (zero) {
959                 ras_reset(ras, index);
960                 GOTO(out_unlock, 0);
961         }
962
963         ras->ras_last_readpage = index;
964         ras->ras_consecutive++;
965         ras_set_start(ras, index);
966         ras->ras_next_readahead = max(ras->ras_window_start,
967                                       ras->ras_next_readahead);
968
969         /* wait for a few pages to arrive before issuing readahead to avoid
970          * the worst overutilization */
971         if (ras->ras_consecutive == 3) {
972                 ras->ras_window_len = PTLRPC_MAX_BRW_PAGES;
973                 GOTO(out_unlock, 0);
974         }
975
976         /* we need to increase the window sometimes.  we'll arbitrarily
977          * do it half-way through the pages in an rpc */
978         if ((index & (PTLRPC_MAX_BRW_PAGES - 1)) == 
979             (PTLRPC_MAX_BRW_PAGES >> 1)) {
980                 ras->ras_window_len += PTLRPC_MAX_BRW_PAGES;
981                 ras->ras_window_len = min(ras->ras_window_len,
982                                           ra->ra_max_pages);
983
984         }
985
986         EXIT;
987 out_unlock:
988         RAS_CDEBUG(ras);
989         spin_unlock(&ras->ras_lock);
990         spin_unlock(&sbi->ll_lock);
991 }
992
993 /*
994  * for now we do our readpage the same on both 2.4 and 2.5.  The kernel's
995  * read-ahead assumes it is valid to issue readpage all the way up to
996  * i_size, but our dlm locks make that not the case.  We disable the
997  * kernel's read-ahead and do our own by walking ahead in the page cache
998  * checking for dlm lock coverage.  the main difference between 2.4 and
999  * 2.6 is how read-ahead gets batched and issued, but we're using our own,
1000  * so they look the same.
1001  */
1002 int ll_readpage(struct file *filp, struct page *page)
1003 {
1004         struct ll_file_data *fd = filp->private_data;
1005         struct inode *inode = page->mapping->host;
1006         struct obd_export *exp;
1007         struct ll_async_page *llap;
1008         struct obd_io_group *oig = NULL;
1009         int rc;
1010         ENTRY;
1011
1012         LASSERT(PageLocked(page));
1013         LASSERT(!PageUptodate(page));
1014         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset="LPX64"\n",
1015                inode->i_ino, inode->i_generation, inode,
1016                (((obd_off)page->index) << PAGE_SHIFT));
1017         LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
1018
1019         rc = oig_init(&oig);
1020         if (rc < 0)
1021                 GOTO(out, rc);
1022
1023         exp = ll_i2dtexp(inode);
1024         if (exp == NULL)
1025                 GOTO(out, rc = -EINVAL);
1026
1027         llap = llap_from_page(page);
1028         if (IS_ERR(llap))
1029                 GOTO(out, rc = PTR_ERR(llap));
1030
1031         if (ll_i2sbi(inode)->ll_flags & LL_SBI_READAHEAD)
1032                 ras_update(ll_i2sbi(inode), &fd->fd_ras, page->index,
1033                            llap->llap_defer_uptodate);
1034         
1035         if (llap->llap_defer_uptodate) {
1036                 llap->llap_ra_used = 1;
1037                 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1038                                   fd->fd_flags);
1039                 if (rc > 0)
1040                         obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, 
1041                                              NULL, oig);
1042                 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
1043                 SetPageUptodate(page);
1044                 unlock_page(page);
1045                 GOTO(out_oig, rc = 0);
1046         }
1047
1048         rc = ll_page_matches(page, fd->fd_flags);
1049         if (rc < 0) {
1050                 LL_CDEBUG_PAGE(D_ERROR, page, "lock match failed: rc %d\n", rc);
1051                 GOTO(out, rc);
1052         }
1053
1054         if (rc == 0) {
1055                 CWARN("ino %lu page %lu (%llu) not covered by "
1056                       "a lock (mmap?).  check debug logs.\n",
1057                       inode->i_ino, page->index,
1058                       (long long)page->index << PAGE_CACHE_SHIFT);
1059         }
1060
1061         rc = ll_issue_page_read(exp, llap, oig, 0);
1062         if (rc)
1063                 GOTO(out, rc);
1064
1065         LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
1066         if (ll_i2sbi(inode)->ll_flags & LL_SBI_READAHEAD)
1067                 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1068                              fd->fd_flags);
1069
1070         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
1071         EXIT;
1072 out:
1073         if (rc)
1074                 unlock_page(page);
1075 out_oig:
1076         if (oig != NULL)
1077                 oig_release(oig);
1078         return rc;
1079 }