Whamcloud - gitweb
Branch b1_4
[fs/lustre-release.git] / lustre / llite / rw.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Lite I/O page cache routines shared by different kernel revs
5  *
6  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #include <linux/config.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/string.h>
28 #include <linux/stat.h>
29 #include <linux/errno.h>
30 #include <linux/smp_lock.h>
31 #include <linux/unistd.h>
32 #include <linux/version.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
35
36 #include <linux/fs.h>
37 #include <linux/stat.h>
38 #include <asm/uaccess.h>
39 #include <asm/segment.h>
40 #include <linux/mm.h>
41 #include <linux/pagemap.h>
42 #include <linux/smp_lock.h>
43
44 #define DEBUG_SUBSYSTEM S_LLITE
45
46 #include <linux/lustre_mds.h>
47 #include <linux/lustre_lite.h>
48 #include "llite_internal.h"
49 #include <linux/lustre_compat25.h>
50
51 #ifndef list_for_each_prev_safe
52 #define list_for_each_prev_safe(pos, n, head) \
53         for (pos = (head)->prev, n = pos->prev; pos != (head); \
54                 pos = n, n = pos->prev )
55 #endif
56
57 kmem_cache_t *ll_async_page_slab = NULL;
58 size_t ll_async_page_slab_size = 0;
59
60 /* SYNCHRONOUS I/O to object storage for an inode */
61 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
62                   struct page *page, int flags)
63 {
64         struct ll_inode_info *lli = ll_i2info(inode);
65         struct lov_stripe_md *lsm = lli->lli_smd;
66         struct brw_page pg;
67         int rc;
68         ENTRY;
69
70         pg.pg = page;
71         pg.off = ((obd_off)page->index) << PAGE_SHIFT;
72
73         if (cmd == OBD_BRW_WRITE && (pg.off + PAGE_SIZE > inode->i_size))
74                 pg.count = inode->i_size % PAGE_SIZE;
75         else
76                 pg.count = PAGE_SIZE;
77
78         LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
79                        cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
80                        inode->i_ino, pg.off, pg.off);
81         if (pg.count == 0) {
82                 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
83                        LPU64"\n",
84                        inode->i_ino, inode, inode->i_size, page->mapping->host,
85                        page->mapping->host->i_size, page->index, pg.off);
86         }
87
88         pg.flag = flags;
89
90         if (cmd == OBD_BRW_WRITE)
91                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
92                                     LPROC_LL_BRW_WRITE, pg.count);
93         else
94                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
95                                     LPROC_LL_BRW_READ, pg.count);
96         rc = obd_brw(cmd, ll_i2obdexp(inode), oa, lsm, 1, &pg, NULL);
97         if (rc == 0)
98                 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
99         else if (rc != -EIO)
100                 CERROR("error from obd_brw: rc = %d\n", rc);
101         RETURN(rc);
102 }
103
104 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
105
106 /* this isn't where truncate starts.   roughly:
107  * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate
108  * we grab the lock back in setattr_raw to avoid races.
109  *
110  * must be called with lli_size_sem held */
111 void ll_truncate(struct inode *inode)
112 {
113         struct ll_inode_info *lli = ll_i2info(inode);
114         struct lov_stripe_md *lsm = lli->lli_smd;
115         struct obdo oa;
116         int rc;
117         ENTRY;
118         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %llu\n", inode->i_ino,
119                inode->i_generation, inode, inode->i_size);
120
121         if (lli->lli_size_pid != current->pid) {
122                 EXIT;
123                 return;
124         }
125
126         if (!lsm) {
127                 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
128                        inode->i_ino);
129                 GOTO(out_unlock, 0);
130         }
131
132         LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
133
134         if (lov_merge_size(lsm, 0) == inode->i_size) {
135                 CDEBUG(D_VFSTRACE, "skipping punch for "LPX64" (size = %llu)\n",
136                        lsm->lsm_object_id, inode->i_size);
137                 GOTO(out_unlock, 0);
138         }
139
140         CDEBUG(D_INFO, "calling punch for "LPX64" (new size %llu)\n",
141                lsm->lsm_object_id, inode->i_size);
142
143         oa.o_id = lsm->lsm_object_id;
144         oa.o_valid = OBD_MD_FLID;
145         obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |
146                         OBD_MD_FLATIME |OBD_MD_FLMTIME |OBD_MD_FLCTIME);
147
148         obd_adjust_kms(ll_i2obdexp(inode), lsm, inode->i_size, 1);
149
150         lli->lli_size_pid = 0;
151         up(&lli->lli_size_sem);
152
153         rc = obd_punch(ll_i2obdexp(inode), &oa, lsm, inode->i_size,
154                        OBD_OBJECT_EOF, NULL);
155         if (rc)
156                 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
157         else
158                 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE|OBD_MD_FLBLOCKS|
159                               OBD_MD_FLATIME | OBD_MD_FLMTIME |
160                               OBD_MD_FLCTIME);
161         EXIT;
162         return;
163
164  out_unlock:
165         lli->lli_size_pid = 0;
166         up(&lli->lli_size_sem);
167 } /* ll_truncate */
168
169 __u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
170 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
171                      unsigned to)
172 {
173         struct inode *inode = page->mapping->host;
174         struct ll_inode_info *lli = ll_i2info(inode);
175         struct lov_stripe_md *lsm = lli->lli_smd;
176         obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
177         struct brw_page pga;
178         struct obdo oa;
179         __u64 kms;
180         int rc = 0;
181         ENTRY;
182
183         LASSERT(PageLocked(page));
184         (void)llap_cast_private(page); /* assertion */
185
186         /* Check to see if we should return -EIO right away */
187         pga.pg = page;
188         pga.off = offset;
189         pga.count = PAGE_SIZE;
190         pga.flag = 0;
191
192         oa.o_id = lsm->lsm_object_id;
193         oa.o_mode = inode->i_mode;
194         oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | OBD_MD_FLTYPE;
195
196         rc = obd_brw(OBD_BRW_CHECK, ll_i2obdexp(inode), &oa, lsm, 1, &pga,
197                      NULL);
198         if (rc)
199                 RETURN(rc);
200
201         if (PageUptodate(page)) {
202                 LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
203                 RETURN(0);
204         }
205
206         /* We're completely overwriting an existing page, so _don't_ set it up
207          * to date until commit_write */
208         if (from == 0 && to == PAGE_SIZE) {
209                 LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
210                 POISON_PAGE(page, 0x11);
211                 RETURN(0);
212         }
213
214         /* If are writing to a new page, no need to read old data.  The extent
215          * locking will have updated the KMS, and for our purposes here we can
216          * treat it like i_size. */
217         down(&lli->lli_size_sem);
218         kms = lov_merge_size(lsm, 1);
219         up(&lli->lli_size_sem);
220         if (kms <= offset) {
221                 LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
222                                kms, offset);
223                 memset(kmap(page), 0, PAGE_SIZE);
224                 kunmap(page);
225                 GOTO(prepare_done, rc = 0);
226         }
227
228         /* XXX could be an async ocp read.. read-ahead? */
229         rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
230         if (rc == 0) {
231                 /* bug 1598: don't clobber blksize */
232                 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
233                 obdo_refresh_inode(inode, &oa, oa.o_valid);
234         }
235
236         EXIT;
237  prepare_done:
238         if (rc == 0)
239                 SetPageUptodate(page);
240
241         return rc;
242 }
243
244 struct ll_async_page *llap_from_cookie(void *cookie)
245 {
246         struct ll_async_page *llap = cookie;
247         if (llap->llap_magic != LLAP_MAGIC)
248                 return ERR_PTR(-EINVAL);
249         return llap;
250 };
251
252 static int ll_ap_make_ready(void *data, int cmd)
253 {
254         struct ll_async_page *llap;
255         struct page *page;
256         ENTRY;
257
258         llap = llap_from_cookie(data);
259         if (IS_ERR(llap))
260                 RETURN(-EINVAL);
261
262         page = llap->llap_page;
263
264         LASSERT(cmd != OBD_BRW_READ);
265
266         /* we're trying to write, but the page is locked.. come back later */
267         if (TryLockPage(page))
268                 RETURN(-EAGAIN);
269
270         LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
271         page_cache_get(page);
272
273         /* if we left PageDirty we might get another writepage call
274          * in the future.  list walkers are bright enough
275          * to check page dirty so we can leave it on whatever list
276          * its on.  XXX also, we're called with the cli list so if
277          * we got the page cache list we'd create a lock inversion
278          * with the removepage path which gets the page lock then the
279          * cli lock */
280         clear_page_dirty(page);
281         RETURN(0);
282 }
283
284 /* We have two reasons for giving llite the opportunity to change the
285  * write length of a given queued page as it builds the RPC containing
286  * the page:
287  *
288  * 1) Further extending writes may have landed in the page cache
289  *    since a partial write first queued this page requiring us
290  *    to write more from the page cache.  (No further races are possible, since
291  *    by the time this is called, the page is locked.)
292  * 2) We might have raced with truncate and want to avoid performing
293  *    write RPCs that are just going to be thrown away by the
294  *    truncate's punch on the storage targets.
295  *
296  * The kms serves these purposes as it is set at both truncate and extending
297  * writes.
298  */
299 static int ll_ap_refresh_count(void *data, int cmd)
300 {
301         struct ll_inode_info *lli;
302         struct ll_async_page *llap;
303         struct lov_stripe_md *lsm;
304         struct page *page;
305         __u64 kms;
306         ENTRY;
307
308         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
309         LASSERT(cmd != OBD_BRW_READ);
310
311         llap = llap_from_cookie(data);
312         if (IS_ERR(llap))
313                 RETURN(PTR_ERR(llap));
314
315         page = llap->llap_page;
316         lli = ll_i2info(page->mapping->host);
317         lsm = lli->lli_smd;
318
319         //down(&lli->lli_size_sem);
320         kms = lov_merge_size(lsm, 1);
321         //up(&lli->lli_size_sem);
322
323         /* catch race with truncate */
324         if (((__u64)page->index << PAGE_SHIFT) >= kms)
325                 return 0;
326
327         /* catch sub-page write at end of file */
328         if (((__u64)page->index << PAGE_SHIFT) + PAGE_SIZE > kms)
329                 return kms % PAGE_SIZE;
330
331         return PAGE_SIZE;
332 }
333
334 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
335 {
336         struct lov_stripe_md *lsm;
337         obd_flag valid_flags;
338
339         lsm = ll_i2info(inode)->lli_smd;
340
341         oa->o_id = lsm->lsm_object_id;
342         oa->o_valid = OBD_MD_FLID;
343         valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
344         if (cmd == OBD_BRW_WRITE) {
345                 oa->o_valid |= OBD_MD_FLIFID | OBD_MD_FLEPOCH;
346                 mdc_pack_fid(obdo_fid(oa), inode->i_ino, 0, inode->i_mode);
347                 oa->o_easize = ll_i2info(inode)->lli_io_epoch;
348
349                 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
350                                OBD_MD_FLUID | OBD_MD_FLGID;
351         }
352
353         obdo_from_inode(oa, inode, valid_flags);
354 }
355
356 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
357 {
358         struct ll_async_page *llap;
359         ENTRY;
360
361         llap = llap_from_cookie(data);
362         if (IS_ERR(llap)) {
363                 EXIT;
364                 return;
365         }
366
367         ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
368         EXIT;
369 }
370
371 static void ll_ap_get_ucred(void *data, struct obd_ucred *ouc)
372 {
373         struct ll_async_page *llap;
374
375         llap = llap_from_cookie(data);
376         if (IS_ERR(llap)) {
377                 EXIT;
378                 return;
379         }
380
381         memcpy(ouc, &llap->llap_ouc, sizeof(*ouc));
382         EXIT;
383 }
384
385 static struct obd_async_page_ops ll_async_page_ops = {
386         .ap_make_ready =        ll_ap_make_ready,
387         .ap_refresh_count =     ll_ap_refresh_count,
388         .ap_fill_obdo =         ll_ap_fill_obdo,
389         .ap_completion =        ll_ap_completion,
390         .ap_get_ucred =         ll_ap_get_ucred,
391 };
392
393 struct ll_async_page *llap_cast_private(struct page *page)
394 {
395         struct ll_async_page *llap = (struct ll_async_page *)page->private;
396
397         LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
398                  "page %p private %lu gave magic %d which != %d\n",
399                  page, page->private, llap->llap_magic, LLAP_MAGIC);
400
401         return llap;
402 }
403
404 /* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
405  *
406  * There is an llap attached onto every page in lustre, linked off @sbi.
407  * We add an llap to the list so we don't lose our place during list walking.
408  * If llaps in the list are being moved they will only move to the end
409  * of the LRU, and we aren't terribly interested in those pages here (we
410  * start at the beginning of the list where the least-used llaps are.
411  */
412 int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
413 {
414         struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
415         unsigned long total, want, count = 0;
416
417         total = sbi->ll_async_page_count;
418
419         /* There can be a large number of llaps (600k or more in a large
420          * memory machine) so the VM 1/6 shrink ratio is likely too much.
421          * Since we are freeing pages also, we don't necessarily want to
422          * shrink so much.  Limit to 40MB of pages + llaps per call. */
423         if (shrink_fraction == 0)
424                 want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
425         else
426                 want = (total + shrink_fraction - 1) / shrink_fraction;
427
428         if (want > 40 << (20 - PAGE_CACHE_SHIFT))
429                 want = 40 << (20 - PAGE_CACHE_SHIFT);
430
431         CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
432                want, total, shrink_fraction);
433
434         spin_lock(&sbi->ll_lock);
435         list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
436
437         while (--total >= 0 && count < want) {
438                 struct page *page;
439
440                 if (unlikely(need_resched())) {
441                         spin_unlock(&sbi->ll_lock);
442                         cond_resched();
443                         spin_lock(&sbi->ll_lock);
444                 }
445
446                 llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
447                 list_del_init(&dummy_llap.llap_pglist_item);
448                 if (llap == NULL)
449                         break;
450
451                 page = llap->llap_page;
452                 LASSERT(page != NULL);
453
454                 list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
455
456                 /* Page needs/undergoing IO */
457                 if (TryLockPage(page)) {
458                         LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
459                         continue;
460                 }
461
462                 /* If page is dirty or undergoing IO don't discard it */
463                 if (llap->llap_write_queued || PageDirty(page) ||
464                     (!PageUptodate(page) &&
465                      llap->llap_origin != LLAP_ORIGIN_READAHEAD)) {
466                         unlock_page(page);
467                         LL_CDEBUG_PAGE(D_PAGE, page, "can't drop from cache: "
468                                        "%s%s%s%s origin %s\n",
469                                        llap->llap_write_queued ? "wq " : "",
470                                        PageDirty(page) ? "pd " : "",
471                                        PageUptodate(page) ? "" : "!pu ",
472                                        llap->llap_defer_uptodate ? "" : "!du",
473                                        llap_origins[llap->llap_origin]);
474                         continue;
475                 }
476
477                 page_cache_get(page);
478                 spin_unlock(&sbi->ll_lock);
479
480                 ++count;
481                 LL_CDEBUG_PAGE(D_PAGE, page, "drop from cache %lu/%lu\n",
482                                count, want);
483                 if (page->mapping != NULL) {
484                         ll_ra_accounting(page, page->mapping);
485                         ll_truncate_complete_page(page);
486                 }
487                 unlock_page(page);
488                 page_cache_release(page);
489
490                 spin_lock(&sbi->ll_lock);
491         }
492         list_del(&dummy_llap.llap_pglist_item);
493         spin_unlock(&sbi->ll_lock);
494
495         CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
496                count, want, total);
497
498         return count;
499 }
500
501 struct ll_async_page *llap_from_page(struct page *page, unsigned origin)
502 {
503         struct ll_async_page *llap;
504         struct obd_export *exp;
505         struct inode *inode = page->mapping->host;
506         struct ll_sb_info *sbi = ll_i2sbi(inode);
507         int rc;
508         ENTRY;
509
510         LASSERT(ll_async_page_slab);
511         LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
512
513         llap = llap_cast_private(page);
514         if (llap != NULL) {
515                 /* move to end of LRU list */
516                 spin_lock(&sbi->ll_lock);
517                 sbi->ll_pglist_gen++;
518                 list_del_init(&llap->llap_pglist_item);
519                 list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
520                 spin_unlock(&sbi->ll_lock);
521                 GOTO(out, llap);
522         }
523
524         exp = ll_i2obdexp(page->mapping->host);
525         if (exp == NULL)
526                 RETURN(ERR_PTR(-EINVAL));
527
528         /* limit the number of lustre-cached pages */
529         if (sbi->ll_async_page_count >= sbi->ll_async_page_max)
530                 llap_shrink_cache(sbi, 0);
531
532         OBD_SLAB_ALLOC(llap, ll_async_page_slab, SLAB_KERNEL,
533                        ll_async_page_slab_size);
534         if (llap == NULL)
535                 RETURN(ERR_PTR(-ENOMEM));
536         llap->llap_magic = LLAP_MAGIC;
537         llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
538
539         rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
540                                  (obd_off)page->index << PAGE_SHIFT,
541                                  &ll_async_page_ops, llap, &llap->llap_cookie);
542         if (rc) {
543                 OBD_SLAB_FREE(llap, ll_async_page_slab,
544                               ll_async_page_slab_size);
545                 RETURN(ERR_PTR(rc));
546         }
547
548         CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
549                page, llap->llap_cookie, (obd_off)page->index << PAGE_SHIFT);
550         /* also zeroing the PRIVBITS low order bitflags */
551         __set_page_ll_data(page, llap);
552         llap->llap_page = page;
553
554         spin_lock(&sbi->ll_lock);
555         sbi->ll_pglist_gen++;
556         sbi->ll_async_page_count++;
557         list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
558         spin_unlock(&sbi->ll_lock);
559
560 out:
561         llap->llap_origin = origin;
562         RETURN(llap);
563 }
564
565 static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
566                                struct ll_async_page *llap,
567                                unsigned to, obd_flag async_flags)
568 {
569         unsigned long size_index = inode->i_size >> PAGE_SHIFT;
570         struct obd_io_group *oig;
571         int rc;
572         ENTRY;
573
574         /* _make_ready only sees llap once we've unlocked the page */
575         llap->llap_write_queued = 1;
576         rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
577                                 llap->llap_cookie, OBD_BRW_WRITE, 0, 0, 0,
578                                 async_flags);
579         if (rc == 0) {
580                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
581                 //llap_write_pending(inode, llap);
582                 GOTO(out, 0);
583         }
584
585         llap->llap_write_queued = 0;
586
587         rc = oig_init(&oig);
588         if (rc)
589                 GOTO(out, rc);
590
591         /* make full-page requests if we are not at EOF (bug 4410) */
592         if (to != PAGE_SIZE && llap->llap_page->index < size_index) {
593                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
594                                "sync write before EOF: size_index %lu, to %d\n",
595                                size_index, to);
596                 to = PAGE_SIZE;
597         } else if (to != PAGE_SIZE && llap->llap_page->index == size_index) {
598                 int size_to = inode->i_size & ~PAGE_MASK;
599                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
600                                "sync write at EOF: size_index %lu, to %d/%d\n",
601                                size_index, to, size_to);
602                 if (to < size_to)
603                         to = size_to;
604         }
605
606         rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
607                                 llap->llap_cookie, OBD_BRW_WRITE, 0, to, 0,
608                                 ASYNC_READY | ASYNC_URGENT |
609                                 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
610         if (rc)
611                 GOTO(free_oig, rc);
612
613         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
614         if (rc)
615                 GOTO(free_oig, rc);
616
617         rc = oig_wait(oig);
618
619         if (!rc && async_flags & ASYNC_READY)
620                 unlock_page(llap->llap_page);
621
622         LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
623
624 free_oig:
625         oig_release(oig);
626 out:
627         RETURN(rc);
628 }
629
630 /* update our write count to account for i_size increases that may have
631  * happened since we've queued the page for io. */
632
633 /* be careful not to return success without setting the page Uptodate or
634  * the next pass through prepare_write will read in stale data from disk. */
635 int ll_commit_write(struct file *file, struct page *page, unsigned from,
636                     unsigned to)
637 {
638         struct inode *inode = page->mapping->host;
639         struct ll_inode_info *lli = ll_i2info(inode);
640         struct lov_stripe_md *lsm = lli->lli_smd;
641         struct obd_export *exp;
642         struct ll_async_page *llap;
643         struct ll_uctxt ctxt;
644         loff_t size;
645         int rc = 0;
646         ENTRY;
647
648         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
649         LASSERT(inode == file->f_dentry->d_inode);
650         LASSERT(PageLocked(page));
651
652         CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
653                inode, page, from, to, page->index);
654
655         llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
656         if (IS_ERR(llap))
657                 RETURN(PTR_ERR(llap));
658
659         exp = ll_i2obdexp(inode);
660         if (exp == NULL)
661                 RETURN(-EINVAL);
662
663         /* set user credit information for this page */
664         llap->llap_ouc.ouc_fsuid = current->fsuid;
665         llap->llap_ouc.ouc_fsgid = current->fsgid;
666         llap->llap_ouc.ouc_cap = current->cap_effective;
667         ll_i2uctxt(&ctxt, inode, NULL);
668         llap->llap_ouc.ouc_suppgid1 = ctxt.gid1;
669
670         /* queue a write for some time in the future the first time we
671          * dirty the page */
672         if (!PageDirty(page)) {
673                 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
674                                      LPROC_LL_DIRTY_MISSES);
675
676                 rc = queue_or_sync_write(exp, inode, llap, to, 0);
677                 if (rc)
678                         GOTO(out, rc);
679         } else {
680                 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
681                                      LPROC_LL_DIRTY_HITS);
682         }
683
684         /* put the page in the page cache, from now on ll_removepage is
685          * responsible for cleaning up the llap.
686          * only set page dirty when it's queued to be write out */
687         if (llap->llap_write_queued)
688                 set_page_dirty(page);
689
690 out:
691         size = (((obd_off)page->index) << PAGE_SHIFT) + to;
692         down(&lli->lli_size_sem);
693         if (rc == 0) {
694                 obd_adjust_kms(exp, lsm, size, 0);
695                 if (size > inode->i_size)
696                         inode->i_size = size;
697                 SetPageUptodate(page);
698         } else if (size > inode->i_size) {
699                 /* this page beyond the pales of i_size, so it can't be
700                  * truncated in ll_p_r_e during lock revoking. we must
701                  * teardown our book-keeping here. */
702                 ll_removepage(page);
703         }
704         up(&lli->lli_size_sem);
705         RETURN(rc);
706 }
707
708 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
709 {
710         struct ll_ra_info *ra = &sbi->ll_ra_info;
711         unsigned long ret;
712         ENTRY;
713
714         spin_lock(&sbi->ll_lock);
715         ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
716         ra->ra_cur_pages += ret;
717         spin_unlock(&sbi->ll_lock);
718
719         RETURN(ret);
720 }
721
722 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
723 {
724         struct ll_ra_info *ra = &sbi->ll_ra_info;
725         spin_lock(&sbi->ll_lock);
726         LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
727                  ra->ra_cur_pages, len);
728         ra->ra_cur_pages -= len;
729         spin_unlock(&sbi->ll_lock);
730 }
731
732 /* called for each page in a completed rpc.*/
733 void ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
734 {
735         struct ll_async_page *llap;
736         struct page *page;
737         ENTRY;
738
739         llap = llap_from_cookie(data);
740         if (IS_ERR(llap)) {
741                 EXIT;
742                 return;
743         }
744
745         page = llap->llap_page;
746         LASSERT(PageLocked(page));
747
748         LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
749
750         if (cmd == OBD_BRW_READ && llap->llap_defer_uptodate)
751                 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
752
753         if (rc == 0)  {
754                 if (cmd == OBD_BRW_READ) {
755                         if (!llap->llap_defer_uptodate)
756                                 SetPageUptodate(page);
757                 } else {
758                         llap->llap_write_queued = 0;
759                 }
760                 ClearPageError(page);
761         } else {
762                 if (cmd == OBD_BRW_READ) {
763                         llap->llap_defer_uptodate = 0;
764                 } else {
765                         ll_redirty_page(page);
766                 }
767                 SetPageError(page);
768         }
769
770         unlock_page(page);
771
772         if (0 && cmd == OBD_BRW_WRITE) {
773                 llap_write_complete(page->mapping->host, llap);
774                 ll_try_done_writing(page->mapping->host);
775         }
776
777         if (PageWriteback(page)) {
778                 end_page_writeback(page);
779         }
780         page_cache_release(page);
781         EXIT;
782 }
783
784 /* the kernel calls us here when a page is unhashed from the page cache.
785  * the page will be locked and the kernel is holding a spinlock, so
786  * we need to be careful.  we're just tearing down our book-keeping
787  * here. */
788 void ll_removepage(struct page *page)
789 {
790         struct inode *inode = page->mapping->host;
791         struct obd_export *exp;
792         struct ll_async_page *llap;
793         struct ll_sb_info *sbi = ll_i2sbi(inode);
794         int rc;
795         ENTRY;
796
797         LASSERT(!in_interrupt());
798
799         /* sync pages or failed read pages can leave pages in the page
800          * cache that don't have our data associated with them anymore */
801         if (page->private == 0) {
802                 EXIT;
803                 return;
804         }
805
806         LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
807
808         exp = ll_i2obdexp(inode);
809         if (exp == NULL) {
810                 CERROR("page %p ind %lu gave null export\n", page, page->index);
811                 EXIT;
812                 return;
813         }
814
815         llap = llap_from_page(page, 0);
816         if (IS_ERR(llap)) {
817                 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
818                        page->index, PTR_ERR(llap));
819                 EXIT;
820                 return;
821         }
822
823         //llap_write_complete(inode, llap);
824         rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
825                                      llap->llap_cookie);
826         if (rc != 0)
827                 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
828
829         /* this unconditional free is only safe because the page lock
830          * is providing exclusivity to memory pressure/truncate/writeback..*/
831         __clear_page_ll_data(page);
832
833         spin_lock(&sbi->ll_lock);
834         if (!list_empty(&llap->llap_pglist_item))
835                 list_del_init(&llap->llap_pglist_item);
836         sbi->ll_pglist_gen++;
837         sbi->ll_async_page_count--;
838         spin_unlock(&sbi->ll_lock);
839         OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
840         EXIT;
841 }
842
843 static int ll_page_matches(struct page *page, int readahead)
844 {
845         struct lustre_handle match_lockh = {0};
846         struct inode *inode = page->mapping->host;
847         ldlm_policy_data_t page_extent;
848         int flags, matches;
849         ENTRY;
850
851         page_extent.l_extent.start = (__u64)page->index << PAGE_CACHE_SHIFT;
852         page_extent.l_extent.end =
853                 page_extent.l_extent.start + PAGE_CACHE_SIZE - 1;
854         flags = LDLM_FL_TEST_LOCK;
855         if (!readahead)
856                 flags |= LDLM_FL_CBPENDING | LDLM_FL_BLOCK_GRANTED;
857         matches = obd_match(ll_i2sbi(inode)->ll_osc_exp,
858                             ll_i2info(inode)->lli_smd, LDLM_EXTENT,
859                             &page_extent, LCK_PR | LCK_PW, &flags, inode,
860                             &match_lockh);
861         RETURN(matches);
862 }
863
864 static int ll_issue_page_read(struct obd_export *exp,
865                               struct ll_async_page *llap,
866                               struct obd_io_group *oig, int defer)
867 {
868         struct page *page = llap->llap_page;
869         int rc;
870
871         page_cache_get(page);
872         llap->llap_defer_uptodate = defer;
873         llap->llap_ra_used = 0;
874         rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
875                                 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
876                                 PAGE_SIZE, 0, ASYNC_COUNT_STABLE | ASYNC_READY
877                                               | ASYNC_URGENT);
878         if (rc) {
879                 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
880                 page_cache_release(page);
881         }
882         RETURN(rc);
883 }
884
885 static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
886 {
887         LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
888         ra->ra_stats[which]++;
889 }
890
891 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
892 {
893         struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
894         struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
895
896         spin_lock(&sbi->ll_lock);
897         ll_ra_stats_inc_unlocked(ra, which);
898         spin_unlock(&sbi->ll_lock);
899 }
900
901 void ll_ra_accounting(struct page *page, struct address_space *mapping)
902 {
903         struct ll_async_page *llap;
904
905         llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
906         if (IS_ERR(llap))
907                 return;
908
909         if (!llap->llap_defer_uptodate || llap->llap_ra_used)
910                 return;
911
912         ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
913 }
914
915 #define RAS_CDEBUG(ras) \
916         CDEBUG(D_READA, "lrp %lu c %lu ws %lu wl %lu nra %lu\n",        \
917                ras->ras_last_readpage, ras->ras_consecutive,            \
918                ras->ras_window_start, ras->ras_window_len,              \
919                ras->ras_next_readahead);
920
921 static int index_in_window(unsigned long index, unsigned long point,
922                            unsigned long before, unsigned long after)
923 {
924         unsigned long start = point - before, end = point + after;
925
926         if (start > point)
927                start = 0;
928         if (end < point)
929                end = ~0;
930
931         return start <= index && index <= end;
932 }
933
934 static int ll_readahead(struct ll_readahead_state *ras,
935                          struct obd_export *exp, struct address_space *mapping,
936                          struct obd_io_group *oig, int flags)
937 {
938         unsigned long i, start = 0, end = 0, reserved;
939         struct ll_async_page *llap;
940         struct page *page;
941         int rc, ret = 0, match_failed = 0;
942         __u64 kms;
943         unsigned int gfp_mask;
944         ENTRY;
945
946         kms = lov_merge_size(ll_i2info(mapping->host)->lli_smd, 1);
947         if (kms == 0) {
948                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
949                 RETURN(0);
950         }
951
952         spin_lock(&ras->ras_lock);
953         /* reserve a part of the read-ahead window that we'll be issuing */
954         if (ras->ras_window_len) {
955                 start = ras->ras_next_readahead;
956                 end = ras->ras_window_start + ras->ras_window_len - 1;
957                 end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT));
958                 ras->ras_next_readahead = max(end, end + 1);
959
960                 RAS_CDEBUG(ras);
961         }
962         spin_unlock(&ras->ras_lock);
963
964         if (end == 0) {
965                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
966                 RETURN(0);
967         }
968
969         reserved = ll_ra_count_get(ll_i2sbi(mapping->host), end - start + 1);
970         if (reserved < end - start + 1)
971                 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
972
973         gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
974 #ifdef __GFP_NOWARN
975         gfp_mask |= __GFP_NOWARN;
976 #endif
977
978         for (i = start; reserved > 0 && !match_failed && i <= end; i++) {
979                 /* skip locked pages from previous readpage calls */
980                 page = grab_cache_page_nowait_gfp(mapping, i, gfp_mask);
981                 if (page == NULL) {
982                         CDEBUG(D_READA, "g_c_p_n failed\n");
983                         continue;
984                 }
985
986                 /* we do this first so that we can see the page in the /proc
987                  * accounting */
988                 llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
989                 if (IS_ERR(llap) || llap->llap_defer_uptodate)
990                         goto next_page;
991
992                 /* skip completed pages */
993                 if (Page_Uptodate(page))
994                         goto next_page;
995
996                 /* bail when we hit the end of the lock. */
997                 if ((rc = ll_page_matches(page, 1)) <= 0) {
998                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
999                                        "lock match failed: rc %d\n", rc);
1000                         ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
1001                         match_failed = 1;
1002                         goto next_page;
1003                 }
1004
1005                 rc = ll_issue_page_read(exp, llap, oig, 1);
1006                 if (rc == 0) {
1007                         reserved--;
1008                         ret++;
1009                         LL_CDEBUG_PAGE(D_READA| D_PAGE, page,
1010                                        "started read-ahead\n");
1011                 }
1012                 if (rc) {
1013         next_page:
1014                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
1015                                        "skipping read-ahead\n");
1016
1017                         unlock_page(page);
1018                 }
1019                 page_cache_release(page);
1020         }
1021
1022         LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
1023         if (reserved != 0)
1024                 ll_ra_count_put(ll_i2sbi(mapping->host), reserved);
1025         if (i == end + 1 && end == (kms >> PAGE_CACHE_SHIFT))
1026                 ll_ra_stats_inc(mapping, RA_STAT_EOF);
1027
1028         /* if we didn't get to the end of the region we reserved from
1029          * the ras we need to go back and update the ras so that the
1030          * next read-ahead tries from where we left off.  we only do so
1031          * if the region we failed to issue read-ahead on is still ahead
1032          * of the app and behind the next index to start read-ahead from */
1033         if (i != end + 1) {
1034                 spin_lock(&ras->ras_lock);
1035                 if (i < ras->ras_next_readahead &&
1036                     index_in_window(i, ras->ras_window_start, 0,
1037                                     ras->ras_window_len)) {
1038                         ras->ras_next_readahead = i;
1039                         RAS_CDEBUG(ras);
1040                 }
1041                 spin_unlock(&ras->ras_lock);
1042         }
1043
1044         RETURN(ret);
1045 }
1046
1047 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
1048 {
1049         ras->ras_window_start = index & (~(PTLRPC_MAX_BRW_PAGES - 1));
1050 }
1051
1052 /* called with the ras_lock held or from places where it doesn't matter */
1053 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
1054 {
1055         ras->ras_last_readpage = index;
1056         ras->ras_consecutive = 1;
1057         ras->ras_window_len = 0;
1058         ras_set_start(ras, index);
1059         ras->ras_next_readahead = ras->ras_window_start;
1060
1061         RAS_CDEBUG(ras);
1062 }
1063
1064 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
1065 {
1066         spin_lock_init(&ras->ras_lock);
1067         ras_reset(ras, 0);
1068 }
1069
1070 static void ras_update(struct ll_sb_info *sbi, struct ll_readahead_state *ras,
1071                        unsigned long index, unsigned hit)
1072 {
1073         struct ll_ra_info *ra = &sbi->ll_ra_info;
1074         int zero = 0;
1075         ENTRY;
1076
1077         spin_lock(&sbi->ll_lock);
1078         spin_lock(&ras->ras_lock);
1079
1080         ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
1081
1082         /* reset the read-ahead window in two cases.  First when the app seeks
1083          * or reads to some other part of the file.  Secondly if we get a
1084          * read-ahead miss that we think we've previously issued.  This can
1085          * be a symptom of there being so many read-ahead pages that the VM is
1086          * reclaiming it before we get to it. */
1087         if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
1088                 zero = 1;
1089                 ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
1090         } else if (!hit && ras->ras_window_len &&
1091                    index < ras->ras_next_readahead &&
1092                    index_in_window(index, ras->ras_window_start, 0,
1093                                    ras->ras_window_len)) {
1094                 zero = 1;
1095                 ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
1096         }
1097
1098         if (zero) {
1099                 ras_reset(ras, index);
1100                 GOTO(out_unlock, 0);
1101         }
1102
1103         ras->ras_last_readpage = index;
1104         ras->ras_consecutive++;
1105         ras_set_start(ras, index);
1106         ras->ras_next_readahead = max(ras->ras_window_start,
1107                                       ras->ras_next_readahead);
1108
1109         /* wait for a few pages to arrive before issuing readahead to avoid
1110          * the worst overutilization */
1111         if (ras->ras_consecutive == 3) {
1112                 ras->ras_window_len = PTLRPC_MAX_BRW_PAGES;
1113                 GOTO(out_unlock, 0);
1114         }
1115
1116         /* we need to increase the window sometimes.  we'll arbitrarily
1117          * do it half-way through the pages in an rpc */
1118         if ((index & (PTLRPC_MAX_BRW_PAGES - 1)) ==
1119             (PTLRPC_MAX_BRW_PAGES >> 1)) {
1120                 ras->ras_window_len += PTLRPC_MAX_BRW_PAGES;
1121                 ras->ras_window_len = min(ras->ras_window_len,
1122                                           ra->ra_max_pages);
1123         }
1124
1125         EXIT;
1126 out_unlock:
1127         RAS_CDEBUG(ras);
1128         spin_unlock(&ras->ras_lock);
1129         spin_unlock(&sbi->ll_lock);
1130         return;
1131 }
1132
1133 int ll_writepage(struct page *page)
1134 {
1135         struct inode *inode = page->mapping->host;
1136         struct ll_inode_info *lli = ll_i2info(inode);
1137         struct obd_export *exp;
1138         struct ll_async_page *llap;
1139         int rc = 0;
1140         ENTRY;
1141
1142         LASSERT(!PageDirty(page));
1143         LASSERT(PageLocked(page));
1144
1145         exp = ll_i2obdexp(inode);
1146         if (exp == NULL)
1147                 GOTO(out, rc = -EINVAL);
1148
1149         llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
1150         if (IS_ERR(llap))
1151                 GOTO(out, rc = PTR_ERR(llap));
1152
1153         page_cache_get(page);
1154         if (llap->llap_write_queued) {
1155                 LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
1156                 rc = obd_set_async_flags(exp, lli->lli_smd, NULL,
1157                                          llap->llap_cookie,
1158                                          ASYNC_READY | ASYNC_URGENT);
1159         } else {
1160                 rc = queue_or_sync_write(exp, inode, llap,
1161                                          PAGE_SIZE, ASYNC_READY | ASYNC_URGENT);
1162         }
1163         if (rc)
1164                 page_cache_release(page);
1165 out:
1166         if (rc) {
1167                 if (!lli->lli_async_rc)
1168                         lli->lli_async_rc = rc;
1169                 /* re-dirty page on error so it retries write */
1170                 ll_redirty_page(page);
1171                 unlock_page(page);
1172         }
1173         RETURN(rc);
1174 }
1175
1176 /*
1177  * for now we do our readpage the same on both 2.4 and 2.5.  The kernel's
1178  * read-ahead assumes it is valid to issue readpage all the way up to
1179  * i_size, but our dlm locks make that not the case.  We disable the
1180  * kernel's read-ahead and do our own by walking ahead in the page cache
1181  * checking for dlm lock coverage.  the main difference between 2.4 and
1182  * 2.6 is how read-ahead gets batched and issued, but we're using our own,
1183  * so they look the same.
1184  */
1185 int ll_readpage(struct file *filp, struct page *page)
1186 {
1187         struct ll_file_data *fd = filp->private_data;
1188         struct inode *inode = page->mapping->host;
1189         struct obd_export *exp;
1190         struct ll_async_page *llap;
1191         struct obd_io_group *oig = NULL;
1192         int rc;
1193         ENTRY;
1194
1195         LASSERT(PageLocked(page));
1196         LASSERT(!PageUptodate(page));
1197         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset="LPX64"\n",
1198                inode->i_ino, inode->i_generation, inode,
1199                (((obd_off)page->index) << PAGE_SHIFT));
1200         LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
1201
1202         rc = oig_init(&oig);
1203         if (rc < 0)
1204                 GOTO(out, rc);
1205
1206         exp = ll_i2obdexp(inode);
1207         if (exp == NULL)
1208                 GOTO(out, rc = -EINVAL);
1209
1210         llap = llap_from_page(page, LLAP_ORIGIN_READPAGE);
1211         if (IS_ERR(llap))
1212                 GOTO(out, rc = PTR_ERR(llap));
1213
1214         if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1215                 ras_update(ll_i2sbi(inode), &fd->fd_ras, page->index,
1216                            llap->llap_defer_uptodate);
1217
1218         if (llap->llap_defer_uptodate) {
1219                 llap->llap_ra_used = 1;
1220                 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1221                                   fd->fd_flags);
1222                 if (rc > 0)
1223                         obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
1224                                              NULL, oig);
1225                 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
1226                 SetPageUptodate(page);
1227                 unlock_page(page);
1228                 GOTO(out_oig, rc = 0);
1229         }
1230
1231         rc = ll_page_matches(page, 0);
1232         if (rc < 0) {
1233                 LL_CDEBUG_PAGE(D_ERROR, page, "lock match failed: rc %d\n", rc);
1234                 GOTO(out, rc);
1235         }
1236
1237         if (rc == 0) {
1238                 CWARN("ino %lu page %lu (%llu) not covered by "
1239                       "a lock (mmap?).  check debug logs.\n",
1240                       inode->i_ino, page->index,
1241                       (long long)page->index << PAGE_CACHE_SHIFT);
1242         }
1243
1244         rc = ll_issue_page_read(exp, llap, oig, 0);
1245         if (rc)
1246                 GOTO(out, rc);
1247
1248         LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
1249         if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1250                 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1251                              fd->fd_flags);
1252
1253         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
1254
1255 out:
1256         if (rc)
1257                 unlock_page(page);
1258 out_oig:
1259         if (oig != NULL)
1260                 oig_release(oig);
1261         RETURN(rc);
1262 }