Whamcloud - gitweb
Cleanup for lov objid code, remove scability problems and
[fs/lustre-release.git] / lustre / llite / rw.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Lite I/O page cache routines shared by different kernel revs
5  *
6  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23 #ifndef AUTOCONF_INCLUDED
24 #include <linux/config.h>
25 #endif
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/string.h>
29 #include <linux/stat.h>
30 #include <linux/errno.h>
31 #include <linux/smp_lock.h>
32 #include <linux/unistd.h>
33 #include <linux/version.h>
34 #include <asm/system.h>
35 #include <asm/uaccess.h>
36
37 #include <linux/fs.h>
38 #include <linux/stat.h>
39 #include <asm/uaccess.h>
40 #include <asm/segment.h>
41 #include <linux/mm.h>
42 #include <linux/pagemap.h>
43 #include <linux/smp_lock.h>
44
45 #define DEBUG_SUBSYSTEM S_LLITE
46
47 #include <lustre_lite.h>
48 #include "llite_internal.h"
49 #include <linux/lustre_compat25.h>
50
51 #ifndef list_for_each_prev_safe
52 #define list_for_each_prev_safe(pos, n, head) \
53         for (pos = (head)->prev, n = pos->prev; pos != (head); \
54                 pos = n, n = pos->prev )
55 #endif
56
57 cfs_mem_cache_t *ll_async_page_slab = NULL;
58 size_t ll_async_page_slab_size = 0;
59
60 /* SYNCHRONOUS I/O to object storage for an inode */
61 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
62                   struct page *page, int flags)
63 {
64         struct ll_inode_info *lli = ll_i2info(inode);
65         struct lov_stripe_md *lsm = lli->lli_smd;
66         struct obd_info oinfo = { { { 0 } } };
67         struct brw_page pg;
68         int rc;
69         ENTRY;
70
71         pg.pg = page;
72         pg.off = ((obd_off)page->index) << CFS_PAGE_SHIFT;
73
74         if ((cmd & OBD_BRW_WRITE) && (pg.off+CFS_PAGE_SIZE>i_size_read(inode)))
75                 pg.count = i_size_read(inode) % CFS_PAGE_SIZE;
76         else
77                 pg.count = CFS_PAGE_SIZE;
78
79         LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
80                        cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
81                        inode->i_ino, pg.off, pg.off);
82         if (pg.count == 0) {
83                 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
84                        LPU64"\n", inode->i_ino, inode, i_size_read(inode),
85                        page->mapping->host, i_size_read(page->mapping->host),
86                        page->index, pg.off);
87         }
88
89         pg.flag = flags;
90
91         if (cmd & OBD_BRW_WRITE)
92                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_WRITE,
93                                    pg.count);
94         else
95                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_READ,
96                            pg.count);
97         oinfo.oi_oa = oa;
98         oinfo.oi_md = lsm;
99         rc = obd_brw(cmd, ll_i2obdexp(inode), &oinfo, 1, &pg, NULL);
100         if (rc == 0)
101                 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
102         else if (rc != -EIO)
103                 CERROR("error from obd_brw: rc = %d\n", rc);
104         RETURN(rc);
105 }
106
107 int ll_file_punch(struct inode * inode, loff_t new_size, int srvlock)
108 {
109         struct ll_inode_info *lli = ll_i2info(inode);
110         struct obd_info oinfo = { { { 0 } } };
111         struct obdo oa;
112         int rc;
113
114         ENTRY;
115         CDEBUG(D_INFO, "calling punch for "LPX64" (new size %Lu=%#Lx)\n",
116                lli->lli_smd->lsm_object_id, new_size, new_size);
117
118         oinfo.oi_md = lli->lli_smd;
119         oinfo.oi_policy.l_extent.start = new_size;
120         oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
121         oinfo.oi_oa = &oa;
122         oa.o_id = lli->lli_smd->lsm_object_id;
123         oa.o_valid = OBD_MD_FLID;
124         oa.o_flags = srvlock ? OBD_FL_TRUNCLOCK : 0;
125         obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |OBD_MD_FLFID|
126                         OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME |
127                         OBD_MD_FLUID | OBD_MD_FLGID | OBD_MD_FLGENER |
128                         OBD_MD_FLBLOCKS);
129         rc = obd_punch_rqset(ll_i2obdexp(inode), &oinfo, NULL);
130         if (rc) {
131                 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
132                 RETURN(rc);
133         }
134         obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
135                       OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
136         RETURN(0);
137 }
138 /* this isn't where truncate starts.   roughly:
139  * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate. setattr_raw grabs
140  * DLM lock on [size, EOF], i_mutex, ->lli_size_sem, and WRITE_I_ALLOC_SEM to
141  * avoid races.
142  *
143  * must be called under ->lli_size_sem */
144 void ll_truncate(struct inode *inode)
145 {
146         struct ll_inode_info *lli = ll_i2info(inode);
147         int srvlock = test_bit(LLI_F_SRVLOCK, &lli->lli_flags);
148         loff_t new_size;
149         ENTRY;
150         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu=%#Lx\n",inode->i_ino,
151                inode->i_generation, inode, i_size_read(inode), i_size_read(inode));
152
153         ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
154         if (lli->lli_size_sem_owner != current) {
155                 EXIT;
156                 return;
157         }
158
159         if (!lli->lli_smd) {
160                 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
161                        inode->i_ino);
162                 GOTO(out_unlock, 0);
163         }
164
165         LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
166
167         if (!srvlock) {
168                 struct ost_lvb lvb;
169                 int rc;
170
171                 /* XXX I'm pretty sure this is a hack to paper over a more fundamental
172                  * race condition. */
173                 lov_stripe_lock(lli->lli_smd);
174                 inode_init_lvb(inode, &lvb);
175                 rc = obd_merge_lvb(ll_i2obdexp(inode), lli->lli_smd, &lvb, 0);
176                 inode->i_blocks = lvb.lvb_blocks;
177                 if (lvb.lvb_size == i_size_read(inode) && rc == 0) {
178                         CDEBUG(D_VFSTRACE, "skipping punch for obj "LPX64", %Lu=%#Lx\n",
179                                lli->lli_smd->lsm_object_id, i_size_read(inode),
180                                i_size_read(inode));
181                         lov_stripe_unlock(lli->lli_smd);
182                         GOTO(out_unlock, 0);
183                 }
184
185                 obd_adjust_kms(ll_i2obdexp(inode), lli->lli_smd,
186                                i_size_read(inode), 1);
187                 lov_stripe_unlock(lli->lli_smd);
188         }
189
190         if (unlikely((ll_i2sbi(inode)->ll_flags & LL_SBI_LLITE_CHECKSUM) &&
191                      (i_size_read(inode) & ~CFS_PAGE_MASK))) {
192                 /* If the truncate leaves a partial page, update its checksum */
193                 struct page *page = find_get_page(inode->i_mapping,
194                                                   i_size_read(inode) >>
195                                                   CFS_PAGE_SHIFT);
196                 if (page != NULL) {
197                         struct ll_async_page *llap = llap_cast_private(page);
198                         if (llap != NULL) {
199                                 char *kaddr = kmap_atomic(page, KM_USER0);
200                                 llap->llap_checksum =
201                                         crc32_le(0, kaddr, CFS_PAGE_SIZE);
202                                 kunmap_atomic(kaddr, KM_USER0);
203                         }
204                         page_cache_release(page);
205                 }
206         }
207
208         new_size = i_size_read(inode);
209         ll_inode_size_unlock(inode, 0);
210         if (!srvlock)
211                 ll_file_punch(inode, new_size, 0);
212
213         EXIT;
214         return;
215
216  out_unlock:
217         ll_inode_size_unlock(inode, 0);
218 } /* ll_truncate */
219
220 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
221                      unsigned to)
222 {
223         struct inode *inode = page->mapping->host;
224         struct ll_inode_info *lli = ll_i2info(inode);
225         struct lov_stripe_md *lsm = lli->lli_smd;
226         obd_off offset = ((obd_off)page->index) << CFS_PAGE_SHIFT;
227         struct obd_info oinfo = { { { 0 } } };
228         struct brw_page pga;
229         struct obdo oa;
230         struct ost_lvb lvb;
231         int rc = 0;
232         ENTRY;
233
234         LASSERT(PageLocked(page));
235         (void)llap_cast_private(page); /* assertion */
236
237         /* Check to see if we should return -EIO right away */
238         pga.pg = page;
239         pga.off = offset;
240         pga.count = CFS_PAGE_SIZE;
241         pga.flag = 0;
242
243         oa.o_mode = inode->i_mode;
244         oa.o_id = lsm->lsm_object_id;
245         oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | OBD_MD_FLTYPE;
246         obdo_from_inode(&oa, inode, OBD_MD_FLFID | OBD_MD_FLGENER);
247
248         oinfo.oi_oa = &oa;
249         oinfo.oi_md = lsm;
250         rc = obd_brw(OBD_BRW_CHECK, ll_i2obdexp(inode), &oinfo, 1, &pga, NULL);
251         if (rc)
252                 RETURN(rc);
253
254         if (PageUptodate(page)) {
255                 LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
256                 RETURN(0);
257         }
258
259         /* We're completely overwriting an existing page, so _don't_ set it up
260          * to date until commit_write */
261         if (from == 0 && to == CFS_PAGE_SIZE) {
262                 LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
263                 POISON_PAGE(page, 0x11);
264                 RETURN(0);
265         }
266
267         /* If are writing to a new page, no need to read old data.  The extent
268          * locking will have updated the KMS, and for our purposes here we can
269          * treat it like i_size. */
270         lov_stripe_lock(lsm);
271         inode_init_lvb(inode, &lvb);
272         obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
273         lov_stripe_unlock(lsm);
274         if (lvb.lvb_size <= offset) {
275                 char *kaddr = kmap_atomic(page, KM_USER0);
276                 LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
277                                lvb.lvb_size, offset);
278                 memset(kaddr, 0, CFS_PAGE_SIZE);
279                 kunmap_atomic(kaddr, KM_USER0);
280                 GOTO(prepare_done, rc = 0);
281         }
282
283         /* XXX could be an async ocp read.. read-ahead? */
284         rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
285         if (rc == 0) {
286                 /* bug 1598: don't clobber blksize */
287                 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
288                 obdo_refresh_inode(inode, &oa, oa.o_valid);
289         }
290
291         EXIT;
292  prepare_done:
293         if (rc == 0)
294                 SetPageUptodate(page);
295
296         return rc;
297 }
298
299 static int ll_ap_make_ready(void *data, int cmd)
300 {
301         struct ll_async_page *llap;
302         struct page *page;
303         ENTRY;
304
305         llap = LLAP_FROM_COOKIE(data);
306         page = llap->llap_page;
307
308         LASSERTF(!(cmd & OBD_BRW_READ), "cmd %x page %p ino %lu index %lu\n", cmd, page,
309                  page->mapping->host->i_ino, page->index);
310
311         /* we're trying to write, but the page is locked.. come back later */
312         if (TryLockPage(page))
313                 RETURN(-EAGAIN);
314
315         LASSERT(!PageWriteback(page));
316
317         /* if we left PageDirty we might get another writepage call
318          * in the future.  list walkers are bright enough
319          * to check page dirty so we can leave it on whatever list
320          * its on.  XXX also, we're called with the cli list so if
321          * we got the page cache list we'd create a lock inversion
322          * with the removepage path which gets the page lock then the
323          * cli lock */
324 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
325         clear_page_dirty(page);
326 #else
327         LASSERTF(!PageWriteback(page),"cmd %x page %p ino %lu index %lu\n", cmd, page,
328                  page->mapping->host->i_ino, page->index);
329         clear_page_dirty_for_io(page);
330
331         /* This actually clears the dirty bit in the radix tree.*/
332         set_page_writeback(page);
333 #endif
334
335         LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
336         page_cache_get(page);
337
338         RETURN(0);
339 }
340
341 /* We have two reasons for giving llite the opportunity to change the
342  * write length of a given queued page as it builds the RPC containing
343  * the page:
344  *
345  * 1) Further extending writes may have landed in the page cache
346  *    since a partial write first queued this page requiring us
347  *    to write more from the page cache.  (No further races are possible, since
348  *    by the time this is called, the page is locked.)
349  * 2) We might have raced with truncate and want to avoid performing
350  *    write RPCs that are just going to be thrown away by the
351  *    truncate's punch on the storage targets.
352  *
353  * The kms serves these purposes as it is set at both truncate and extending
354  * writes.
355  */
356 static int ll_ap_refresh_count(void *data, int cmd)
357 {
358         struct ll_inode_info *lli;
359         struct ll_async_page *llap;
360         struct lov_stripe_md *lsm;
361         struct page *page;
362         struct inode *inode;
363         struct ost_lvb lvb;
364         __u64 kms;
365         ENTRY;
366
367         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
368         LASSERT(cmd != OBD_BRW_READ);
369
370         llap = LLAP_FROM_COOKIE(data);
371         page = llap->llap_page;
372         inode = page->mapping->host;
373         lli = ll_i2info(inode);
374         lsm = lli->lli_smd;
375
376         lov_stripe_lock(lsm);
377         inode_init_lvb(inode, &lvb);
378         obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
379         kms = lvb.lvb_size;
380         lov_stripe_unlock(lsm);
381
382         /* catch race with truncate */
383         if (((__u64)page->index << CFS_PAGE_SHIFT) >= kms)
384                 return 0;
385
386         /* catch sub-page write at end of file */
387         if (((__u64)page->index << CFS_PAGE_SHIFT) + CFS_PAGE_SIZE > kms)
388                 return kms % CFS_PAGE_SIZE;
389
390         return CFS_PAGE_SIZE;
391 }
392
393 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
394 {
395         struct lov_stripe_md *lsm;
396         obd_flag valid_flags;
397
398         lsm = ll_i2info(inode)->lli_smd;
399
400         oa->o_id = lsm->lsm_object_id;
401         oa->o_valid = OBD_MD_FLID;
402         valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
403         if (cmd & OBD_BRW_WRITE) {
404                 oa->o_valid |= OBD_MD_FLEPOCH;
405                 oa->o_easize = ll_i2info(inode)->lli_io_epoch;
406
407                 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
408                         OBD_MD_FLUID | OBD_MD_FLGID |
409                         OBD_MD_FLFID | OBD_MD_FLGENER;
410         }
411
412         obdo_from_inode(oa, inode, valid_flags);
413 }
414
415 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
416 {
417         struct ll_async_page *llap;
418         ENTRY;
419
420         llap = LLAP_FROM_COOKIE(data);
421         ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
422
423         EXIT;
424 }
425
426 static void ll_ap_update_obdo(void *data, int cmd, struct obdo *oa,
427                               obd_valid valid)
428 {
429         struct ll_async_page *llap;
430         ENTRY;
431
432         llap = LLAP_FROM_COOKIE(data);
433         obdo_from_inode(oa, llap->llap_page->mapping->host, valid);
434
435         EXIT;
436 }
437
438 static struct obd_async_page_ops ll_async_page_ops = {
439         .ap_make_ready =        ll_ap_make_ready,
440         .ap_refresh_count =     ll_ap_refresh_count,
441         .ap_fill_obdo =         ll_ap_fill_obdo,
442         .ap_update_obdo =       ll_ap_update_obdo,
443         .ap_completion =        ll_ap_completion,
444 };
445
446 struct ll_async_page *llap_cast_private(struct page *page)
447 {
448         struct ll_async_page *llap = (struct ll_async_page *)page_private(page);
449
450         LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
451                  "page %p private %lu gave magic %d which != %d\n",
452                  page, page_private(page), llap->llap_magic, LLAP_MAGIC);
453
454         return llap;
455 }
456
457 /* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
458  *
459  * There is an llap attached onto every page in lustre, linked off @sbi.
460  * We add an llap to the list so we don't lose our place during list walking.
461  * If llaps in the list are being moved they will only move to the end
462  * of the LRU, and we aren't terribly interested in those pages here (we
463  * start at the beginning of the list where the least-used llaps are.
464  */
465 int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
466 {
467         struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
468         unsigned long total, want, count = 0;
469
470         total = sbi->ll_async_page_count;
471
472         /* There can be a large number of llaps (600k or more in a large
473          * memory machine) so the VM 1/6 shrink ratio is likely too much.
474          * Since we are freeing pages also, we don't necessarily want to
475          * shrink so much.  Limit to 40MB of pages + llaps per call. */
476         if (shrink_fraction == 0)
477                 want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
478         else
479                 want = (total + shrink_fraction - 1) / shrink_fraction;
480
481         if (want > 40 << (20 - CFS_PAGE_SHIFT))
482                 want = 40 << (20 - CFS_PAGE_SHIFT);
483
484         CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
485                want, total, shrink_fraction);
486
487         spin_lock(&sbi->ll_lock);
488         list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
489
490         while (--total >= 0 && count < want) {
491                 struct page *page;
492                 int keep;
493
494                 if (unlikely(need_resched())) {
495                         spin_unlock(&sbi->ll_lock);
496                         cond_resched();
497                         spin_lock(&sbi->ll_lock);
498                 }
499
500                 llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
501                 list_del_init(&dummy_llap.llap_pglist_item);
502                 if (llap == NULL)
503                         break;
504
505                 page = llap->llap_page;
506                 LASSERT(page != NULL);
507
508                 list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
509
510                 /* Page needs/undergoing IO */
511                 if (TryLockPage(page)) {
512                         LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
513                         continue;
514                 }
515
516                keep = (llap->llap_write_queued || PageDirty(page) ||
517                       PageWriteback(page) || (!PageUptodate(page) &&
518                       llap->llap_origin != LLAP_ORIGIN_READAHEAD));
519
520                 LL_CDEBUG_PAGE(D_PAGE, page,"%s LRU page: %s%s%s%s%s origin %s\n",
521                                keep ? "keep" : "drop",
522                                llap->llap_write_queued ? "wq " : "",
523                                PageDirty(page) ? "pd " : "",
524                                PageUptodate(page) ? "" : "!pu ",
525                                PageWriteback(page) ? "wb" : "",
526                                llap->llap_defer_uptodate ? "" : "!du",
527                                llap_origins[llap->llap_origin]);
528
529                 /* If page is dirty or undergoing IO don't discard it */
530                 if (keep) {
531                         unlock_page(page);
532                         continue;
533                 }
534
535                 page_cache_get(page);
536                 spin_unlock(&sbi->ll_lock);
537
538                 if (page->mapping != NULL) {
539                         ll_teardown_mmaps(page->mapping,
540                                          (__u64)page->index << CFS_PAGE_SHIFT,
541                                          ((__u64)page->index << CFS_PAGE_SHIFT)|
542                                           ~CFS_PAGE_MASK);
543                         if (!PageDirty(page) && !page_mapped(page)) {
544                                 ll_ra_accounting(llap, page->mapping);
545                                 ll_truncate_complete_page(page);
546                                 ++count;
547                         } else {
548                                 LL_CDEBUG_PAGE(D_PAGE, page, "Not dropping page"
549                                                              " because it is "
550                                                              "%s\n",
551                                                               PageDirty(page)?
552                                                               "dirty":"mapped");
553                         }
554                 }
555                 unlock_page(page);
556                 page_cache_release(page);
557
558                 spin_lock(&sbi->ll_lock);
559         }
560         list_del(&dummy_llap.llap_pglist_item);
561         spin_unlock(&sbi->ll_lock);
562
563         CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
564                count, want, total);
565
566         return count;
567 }
568
569 static struct ll_async_page *llap_from_page_with_lockh(struct page *page,
570                                                        unsigned origin,
571                                                        struct lustre_handle *lockh)
572 {
573         struct ll_async_page *llap;
574         struct obd_export *exp;
575         struct inode *inode = page->mapping->host;
576         struct ll_sb_info *sbi;
577         int rc;
578         ENTRY;
579
580         if (!inode) {
581                 static int triggered;
582
583                 if (!triggered) {
584                         LL_CDEBUG_PAGE(D_ERROR, page, "Bug 10047. Wrong anon "
585                                        "page received\n");
586                         libcfs_debug_dumpstack(NULL);
587                         triggered = 1;
588                 }
589                 RETURN(ERR_PTR(-EINVAL));
590         }
591         sbi = ll_i2sbi(inode);
592         LASSERT(ll_async_page_slab);
593         LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
594
595         llap = llap_cast_private(page);
596         if (llap != NULL) {
597                 /* move to end of LRU list, except when page is just about to
598                  * die */
599                 if (origin != LLAP_ORIGIN_REMOVEPAGE) {
600                         spin_lock(&sbi->ll_lock);
601                         sbi->ll_pglist_gen++;
602                         list_del_init(&llap->llap_pglist_item);
603                         list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
604                         spin_unlock(&sbi->ll_lock);
605                 }
606                 GOTO(out, llap);
607         }
608
609         exp = ll_i2obdexp(page->mapping->host);
610         if (exp == NULL)
611                 RETURN(ERR_PTR(-EINVAL));
612
613         /* limit the number of lustre-cached pages */
614         if (sbi->ll_async_page_count >= sbi->ll_async_page_max)
615                 llap_shrink_cache(sbi, 0);
616
617         OBD_SLAB_ALLOC(llap, ll_async_page_slab, CFS_ALLOC_STD,
618                        ll_async_page_slab_size);
619         if (llap == NULL)
620                 RETURN(ERR_PTR(-ENOMEM));
621         llap->llap_magic = LLAP_MAGIC;
622         llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
623
624         /* XXX: for bug 11270 - check for lockless origin here! */
625         if (origin == LLAP_ORIGIN_LOCKLESS_IO)
626                 llap->llap_nocache = 1;
627
628         rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
629                                  (obd_off)page->index << CFS_PAGE_SHIFT,
630                                  &ll_async_page_ops, llap, &llap->llap_cookie,
631                                  llap->llap_nocache, lockh);
632         if (rc) {
633                 OBD_SLAB_FREE(llap, ll_async_page_slab,
634                               ll_async_page_slab_size);
635                 RETURN(ERR_PTR(rc));
636         }
637
638         CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
639                page, llap->llap_cookie, (obd_off)page->index << CFS_PAGE_SHIFT);
640         /* also zeroing the PRIVBITS low order bitflags */
641         __set_page_ll_data(page, llap);
642         llap->llap_page = page;
643
644         spin_lock(&sbi->ll_lock);
645         sbi->ll_pglist_gen++;
646         sbi->ll_async_page_count++;
647         list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
648         spin_unlock(&sbi->ll_lock);
649
650  out:
651         if (unlikely(sbi->ll_flags & LL_SBI_LLITE_CHECKSUM)) {
652                 __u32 csum = 0;
653                 char *kaddr = kmap_atomic(page, KM_USER0);
654                 csum = crc32_le(csum, kaddr, CFS_PAGE_SIZE);
655                 kunmap_atomic(kaddr, KM_USER0);
656                 if (origin == LLAP_ORIGIN_READAHEAD ||
657                     origin == LLAP_ORIGIN_READPAGE ||
658                     origin == LLAP_ORIGIN_LOCKLESS_IO) {
659                         llap->llap_checksum = 0;
660                 } else if (origin == LLAP_ORIGIN_COMMIT_WRITE ||
661                            llap->llap_checksum == 0) {
662                         llap->llap_checksum = csum;
663                         CDEBUG(D_PAGE, "page %p cksum %x\n", page, csum);
664                 } else if (llap->llap_checksum == csum) {
665                         /* origin == LLAP_ORIGIN_WRITEPAGE */
666                         CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
667                                page, csum);
668                 } else {
669                         /* origin == LLAP_ORIGIN_WRITEPAGE */
670                         LL_CDEBUG_PAGE(D_ERROR, page, "old cksum %x != new "
671                                        "%x!\n", llap->llap_checksum, csum);
672                 }
673         }
674
675         llap->llap_origin = origin;
676         RETURN(llap);
677 }
678
679 static inline struct ll_async_page *llap_from_page(struct page *page,
680                                                    unsigned origin)
681 {
682         return llap_from_page_with_lockh(page, origin, NULL);
683 }
684
685 static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
686                                struct ll_async_page *llap,
687                                unsigned to, obd_flag async_flags)
688 {
689         unsigned long size_index = i_size_read(inode) >> CFS_PAGE_SHIFT;
690         struct obd_io_group *oig;
691         struct ll_sb_info *sbi = ll_i2sbi(inode);
692         int rc, noquot = llap->llap_ignore_quota ? OBD_BRW_NOQUOTA : 0;
693         ENTRY;
694
695         /* _make_ready only sees llap once we've unlocked the page */
696         llap->llap_write_queued = 1;
697         rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
698                                 llap->llap_cookie, OBD_BRW_WRITE | noquot,
699                                 0, 0, 0, async_flags);
700         if (rc == 0) {
701                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
702                 llap_write_pending(inode, llap);
703                 GOTO(out, 0);
704         }
705
706         llap->llap_write_queued = 0;
707
708         rc = oig_init(&oig);
709         if (rc)
710                 GOTO(out, rc);
711
712         /* make full-page requests if we are not at EOF (bug 4410) */
713         if (to != CFS_PAGE_SIZE && llap->llap_page->index < size_index) {
714                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
715                                "sync write before EOF: size_index %lu, to %d\n",
716                                size_index, to);
717                 to = CFS_PAGE_SIZE;
718         } else if (to != CFS_PAGE_SIZE && llap->llap_page->index == size_index){
719                 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
720                 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
721                                "sync write at EOF: size_index %lu, to %d/%d\n",
722                                size_index, to, size_to);
723                 if (to < size_to)
724                         to = size_to;
725         }
726
727         /* compare the checksum once before the page leaves llite */
728         if (unlikely((sbi->ll_flags & LL_SBI_LLITE_CHECKSUM) &&
729                      llap->llap_checksum != 0)) {
730                 __u32 csum = 0;
731                 struct page *page = llap->llap_page;
732                 char *kaddr = kmap_atomic(page, KM_USER0);
733                 csum = crc32_le(csum, kaddr, CFS_PAGE_SIZE);
734                 kunmap_atomic(kaddr, KM_USER0);
735                 if (llap->llap_checksum == csum) {
736                         CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
737                                page, csum);
738                 } else {
739                         CERROR("page %p old cksum %x != new cksum %x!\n",
740                                page, llap->llap_checksum, csum);
741                 }
742         }
743
744         rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
745                                 llap->llap_cookie, OBD_BRW_WRITE | noquot,
746                                 0, to, 0, ASYNC_READY | ASYNC_URGENT |
747                                 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
748         if (rc)
749                 GOTO(free_oig, rc);
750
751         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
752         if (rc)
753                 GOTO(free_oig, rc);
754
755         rc = oig_wait(oig);
756
757         if (!rc && async_flags & ASYNC_READY) {
758                 unlock_page(llap->llap_page);
759                 if (PageWriteback(llap->llap_page)) {
760                         end_page_writeback(llap->llap_page);
761                 }
762         }
763
764         LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
765
766 free_oig:
767         oig_release(oig);
768 out:
769         RETURN(rc);
770 }
771
772 /* update our write count to account for i_size increases that may have
773  * happened since we've queued the page for io. */
774
775 /* be careful not to return success without setting the page Uptodate or
776  * the next pass through prepare_write will read in stale data from disk. */
777 int ll_commit_write(struct file *file, struct page *page, unsigned from,
778                     unsigned to)
779 {
780         struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
781         struct inode *inode = page->mapping->host;
782         struct ll_inode_info *lli = ll_i2info(inode);
783         struct lov_stripe_md *lsm = lli->lli_smd;
784         struct obd_export *exp;
785         struct ll_async_page *llap;
786         loff_t size;
787         struct lustre_handle *lockh = NULL;
788         int rc = 0;
789         ENTRY;
790
791         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
792         LASSERT(inode == file->f_dentry->d_inode);
793         LASSERT(PageLocked(page));
794
795         CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
796                inode, page, from, to, page->index);
797
798         if (fd->fd_flags & LL_FILE_GROUP_LOCKED)
799                 lockh = &fd->fd_cwlockh;
800
801         llap = llap_from_page_with_lockh(page, LLAP_ORIGIN_COMMIT_WRITE, lockh);
802         if (IS_ERR(llap))
803                 RETURN(PTR_ERR(llap));
804
805         exp = ll_i2obdexp(inode);
806         if (exp == NULL)
807                 RETURN(-EINVAL);
808
809         llap->llap_ignore_quota = capable(CAP_SYS_RESOURCE);
810
811         /* queue a write for some time in the future the first time we
812          * dirty the page */
813         if (!PageDirty(page)) {
814                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_MISSES, 1);
815
816                 rc = queue_or_sync_write(exp, inode, llap, to, 0);
817                 if (rc)
818                         GOTO(out, rc);
819         } else {
820                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_HITS, 1);
821         }
822
823         /* put the page in the page cache, from now on ll_removepage is
824          * responsible for cleaning up the llap.
825          * only set page dirty when it's queued to be write out */
826         if (llap->llap_write_queued)
827                 set_page_dirty(page);
828
829 out:
830         size = (((obd_off)page->index) << CFS_PAGE_SHIFT) + to;
831         ll_inode_size_lock(inode, 0);
832         if (rc == 0) {
833                 lov_stripe_lock(lsm);
834                 obd_adjust_kms(exp, lsm, size, 0);
835                 lov_stripe_unlock(lsm);
836                 if (size > i_size_read(inode))
837                         i_size_write(inode, size);
838                 SetPageUptodate(page);
839         } else if (size > i_size_read(inode)) {
840                 /* this page beyond the pales of i_size, so it can't be
841                  * truncated in ll_p_r_e during lock revoking. we must
842                  * teardown our book-keeping here. */
843                 ll_removepage(page);
844         }
845         ll_inode_size_unlock(inode, 0);
846         RETURN(rc);
847 }
848
849 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
850 {
851         struct ll_ra_info *ra = &sbi->ll_ra_info;
852         unsigned long ret;
853         ENTRY;
854
855         spin_lock(&sbi->ll_lock);
856         ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
857         ra->ra_cur_pages += ret;
858         spin_unlock(&sbi->ll_lock);
859
860         RETURN(ret);
861 }
862
863 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
864 {
865         struct ll_ra_info *ra = &sbi->ll_ra_info;
866         spin_lock(&sbi->ll_lock);
867         LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
868                  ra->ra_cur_pages, len);
869         ra->ra_cur_pages -= len;
870         spin_unlock(&sbi->ll_lock);
871 }
872
873 /* called for each page in a completed rpc.*/
874 int ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
875 {
876         struct ll_async_page *llap;
877         struct page *page;
878         int ret = 0;
879         ENTRY;
880
881         llap = LLAP_FROM_COOKIE(data);
882         page = llap->llap_page;
883         LASSERT(PageLocked(page));
884         LASSERT(CheckWriteback(page,cmd));
885
886         LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
887
888         if (cmd & OBD_BRW_READ && llap->llap_defer_uptodate)
889                 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
890
891         if (rc == 0)  {
892                 if (cmd & OBD_BRW_READ) {
893                         if (!llap->llap_defer_uptodate)
894                                 SetPageUptodate(page);
895                 } else {
896                         llap->llap_write_queued = 0;
897                 }
898                 ClearPageError(page);
899         } else {
900                 if (cmd & OBD_BRW_READ) {
901                         llap->llap_defer_uptodate = 0;
902                 }
903                 SetPageError(page);
904 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
905                 if (rc == -ENOSPC)
906                         set_bit(AS_ENOSPC, &page->mapping->flags);
907                 else
908                         set_bit(AS_EIO, &page->mapping->flags);
909 #else
910                 page->mapping->gfp_mask |= AS_EIO_MASK;
911 #endif
912         }
913
914         unlock_page(page);
915
916         if (cmd & OBD_BRW_WRITE) {
917                 llap_write_complete(page->mapping->host, llap);
918                 ll_try_done_writing(page->mapping->host);
919         }
920
921         if (PageWriteback(page)) {
922                 end_page_writeback(page);
923         }
924         page_cache_release(page);
925
926         RETURN(ret);
927 }
928
929 static void __ll_put_llap(struct page *page)
930 {
931         struct inode *inode = page->mapping->host;
932         struct obd_export *exp;
933         struct ll_async_page *llap;
934         struct ll_sb_info *sbi = ll_i2sbi(inode);
935         int rc;
936         ENTRY;
937
938         exp = ll_i2obdexp(inode);
939         if (exp == NULL) {
940                 CERROR("page %p ind %lu gave null export\n", page, page->index);
941                 EXIT;
942                 return;
943         }
944
945         llap = llap_from_page(page, LLAP_ORIGIN_REMOVEPAGE);
946         if (IS_ERR(llap)) {
947                 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
948                        page->index, PTR_ERR(llap));
949                 EXIT;
950                 return;
951         }
952
953         //llap_write_complete(inode, llap);
954         rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
955                                      llap->llap_cookie);
956         if (rc != 0)
957                 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
958
959         /* this unconditional free is only safe because the page lock
960          * is providing exclusivity to memory pressure/truncate/writeback..*/
961         __clear_page_ll_data(page);
962
963         spin_lock(&sbi->ll_lock);
964         if (!list_empty(&llap->llap_pglist_item))
965                 list_del_init(&llap->llap_pglist_item);
966         sbi->ll_pglist_gen++;
967         sbi->ll_async_page_count--;
968         spin_unlock(&sbi->ll_lock);
969         OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
970
971         EXIT;
972 }
973
974 /* the kernel calls us here when a page is unhashed from the page cache.
975  * the page will be locked and the kernel is holding a spinlock, so
976  * we need to be careful.  we're just tearing down our book-keeping
977  * here. */
978 void ll_removepage(struct page *page)
979 {
980         struct ll_async_page *llap = llap_cast_private(page);
981         ENTRY;
982
983         LASSERT(!in_interrupt());
984
985         /* sync pages or failed read pages can leave pages in the page
986          * cache that don't have our data associated with them anymore */
987         if (page_private(page) == 0) {
988                 EXIT;
989                 return;
990         }
991
992         LASSERT(!llap->llap_lockless_io_page);
993         LASSERT(!llap->llap_nocache);
994
995         LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
996         __ll_put_llap(page);
997
998         EXIT;
999 }
1000
1001 static int ll_issue_page_read(struct obd_export *exp,
1002                               struct ll_async_page *llap,
1003                               struct obd_io_group *oig, int defer)
1004 {
1005         struct page *page = llap->llap_page;
1006         int rc;
1007
1008         page_cache_get(page);
1009         llap->llap_defer_uptodate = defer;
1010         llap->llap_ra_used = 0;
1011         rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
1012                                 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
1013                                 CFS_PAGE_SIZE, 0, ASYNC_COUNT_STABLE | ASYNC_READY |
1014                                               ASYNC_URGENT);
1015         if (rc) {
1016                 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
1017                 page_cache_release(page);
1018         }
1019         RETURN(rc);
1020 }
1021
1022 static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
1023 {
1024         LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
1025         ra->ra_stats[which]++;
1026 }
1027
1028 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
1029 {
1030         struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
1031         struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
1032
1033         spin_lock(&sbi->ll_lock);
1034         ll_ra_stats_inc_unlocked(ra, which);
1035         spin_unlock(&sbi->ll_lock);
1036 }
1037
1038 void ll_ra_accounting(struct ll_async_page *llap, struct address_space *mapping)
1039 {
1040         if (!llap->llap_defer_uptodate || llap->llap_ra_used)
1041                 return;
1042
1043         ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
1044 }
1045
1046 #define RAS_CDEBUG(ras) \
1047         CDEBUG(D_READA,                                                      \
1048                "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu\n", \
1049                ras->ras_last_readpage, ras->ras_consecutive_requests,        \
1050                ras->ras_consecutive_pages, ras->ras_window_start,            \
1051                ras->ras_window_len, ras->ras_next_readahead,                 \
1052                ras->ras_requests, ras->ras_request_index);
1053
1054 static int index_in_window(unsigned long index, unsigned long point,
1055                            unsigned long before, unsigned long after)
1056 {
1057         unsigned long start = point - before, end = point + after;
1058
1059         if (start > point)
1060                start = 0;
1061         if (end < point)
1062                end = ~0;
1063
1064         return start <= index && index <= end;
1065 }
1066
1067 static struct ll_readahead_state *ll_ras_get(struct file *f)
1068 {
1069         struct ll_file_data       *fd;
1070
1071         fd = LUSTRE_FPRIVATE(f);
1072         return &fd->fd_ras;
1073 }
1074
1075 void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
1076 {
1077         struct ll_readahead_state *ras;
1078
1079         ras = ll_ras_get(f);
1080
1081         spin_lock(&ras->ras_lock);
1082         ras->ras_requests++;
1083         ras->ras_request_index = 0;
1084         ras->ras_consecutive_requests++;
1085         rar->lrr_reader = current;
1086
1087         list_add(&rar->lrr_linkage, &ras->ras_read_beads);
1088         spin_unlock(&ras->ras_lock);
1089 }
1090
1091 void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
1092 {
1093         struct ll_readahead_state *ras;
1094
1095         ras = ll_ras_get(f);
1096
1097         spin_lock(&ras->ras_lock);
1098         list_del_init(&rar->lrr_linkage);
1099         spin_unlock(&ras->ras_lock);
1100 }
1101
1102 static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
1103 {
1104         struct ll_ra_read *scan;
1105
1106         list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
1107                 if (scan->lrr_reader == current)
1108                         return scan;
1109         }
1110         return NULL;
1111 }
1112
1113 struct ll_ra_read *ll_ra_read_get(struct file *f)
1114 {
1115         struct ll_readahead_state *ras;
1116         struct ll_ra_read         *bead;
1117
1118         ras = ll_ras_get(f);
1119
1120         spin_lock(&ras->ras_lock);
1121         bead = ll_ra_read_get_locked(ras);
1122         spin_unlock(&ras->ras_lock);
1123         return bead;
1124 }
1125
1126 static int ll_readahead(struct ll_readahead_state *ras,
1127                          struct obd_export *exp, struct address_space *mapping,
1128                          struct obd_io_group *oig, int flags)
1129 {
1130         unsigned long i, start = 0, end = 0, reserved;
1131         struct ll_async_page *llap;
1132         struct page *page;
1133         int rc, ret = 0, match_failed = 0;
1134         __u64 kms;
1135         unsigned int gfp_mask;
1136         struct inode *inode;
1137         struct lov_stripe_md *lsm;
1138         struct ll_ra_read *bead;
1139         struct ost_lvb lvb;
1140         ENTRY;
1141
1142         inode = mapping->host;
1143         lsm = ll_i2info(inode)->lli_smd;
1144
1145         lov_stripe_lock(lsm);
1146         inode_init_lvb(inode, &lvb);
1147         obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
1148         kms = lvb.lvb_size;
1149         lov_stripe_unlock(lsm);
1150         if (kms == 0) {
1151                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
1152                 RETURN(0);
1153         }
1154
1155         spin_lock(&ras->ras_lock);
1156         bead = ll_ra_read_get_locked(ras);
1157         /* Enlarge the RA window to encompass the full read */
1158         if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
1159             bead->lrr_start + bead->lrr_count) {
1160                 ras->ras_window_len = bead->lrr_start + bead->lrr_count -
1161                                       ras->ras_window_start;
1162         }
1163         /* Reserve a part of the read-ahead window that we'll be issuing */
1164         if (ras->ras_window_len) {
1165                 start = ras->ras_next_readahead;
1166                 end = ras->ras_window_start + ras->ras_window_len - 1;
1167         }
1168         if (end != 0) {
1169                 /* Truncate RA window to end of file */
1170                 end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
1171                 ras->ras_next_readahead = max(end, end + 1);
1172                 RAS_CDEBUG(ras);
1173         }
1174         spin_unlock(&ras->ras_lock);
1175
1176         if (end == 0) {
1177                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
1178                 RETURN(0);
1179         }
1180
1181         reserved = ll_ra_count_get(ll_i2sbi(inode), end - start + 1);
1182         if (reserved < end - start + 1)
1183                 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
1184
1185         gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
1186 #ifdef __GFP_NOWARN
1187         gfp_mask |= __GFP_NOWARN;
1188 #endif
1189
1190         for (i = start; reserved > 0 && !match_failed && i <= end; i++) {
1191                 /* skip locked pages from previous readpage calls */
1192                 page = grab_cache_page_nowait_gfp(mapping, i, gfp_mask);
1193                 if (page == NULL) {
1194                         ll_ra_stats_inc(mapping, RA_STAT_FAILED_GRAB_PAGE);
1195                         CDEBUG(D_READA, "g_c_p_n failed\n");
1196                         continue;
1197                 }
1198
1199                 /* Check if page was truncated or reclaimed */
1200                 if (page->mapping != mapping) {
1201                         ll_ra_stats_inc(mapping, RA_STAT_WRONG_GRAB_PAGE);
1202                         CDEBUG(D_READA, "g_c_p_n returned invalid page\n");
1203                         goto next_page;
1204                 }
1205
1206                 /* we do this first so that we can see the page in the /proc
1207                  * accounting */
1208                 llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
1209                 if (IS_ERR(llap) || llap->llap_defer_uptodate) {
1210                         if (PTR_ERR(llap) == -ENOLCK) {
1211                                 ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
1212                                 match_failed = 1;
1213                                 CDEBUG(D_READA | D_PAGE,
1214                                        "Adding page to cache failed index "
1215                                        "%lu\n", i);
1216                         }
1217                         goto next_page;
1218                 }
1219
1220                 /* skip completed pages */
1221                 if (Page_Uptodate(page))
1222                         goto next_page;
1223
1224                 /* bail when we hit the end of the lock. */
1225                 rc = ll_issue_page_read(exp, llap, oig, 1);
1226                 if (rc == 0) {
1227                         reserved--;
1228                         ret++;
1229                         LL_CDEBUG_PAGE(D_READA| D_PAGE, page,
1230                                        "started read-ahead\n");
1231                 } else {
1232         next_page:
1233                         LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
1234                                        "skipping read-ahead\n");
1235
1236                         unlock_page(page);
1237                 }
1238                 page_cache_release(page);
1239         }
1240
1241         LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
1242         if (reserved != 0)
1243                 ll_ra_count_put(ll_i2sbi(inode), reserved);
1244         if (i == end + 1 && end == (kms >> CFS_PAGE_SHIFT))
1245                 ll_ra_stats_inc(mapping, RA_STAT_EOF);
1246
1247         /* if we didn't get to the end of the region we reserved from
1248          * the ras we need to go back and update the ras so that the
1249          * next read-ahead tries from where we left off.  we only do so
1250          * if the region we failed to issue read-ahead on is still ahead
1251          * of the app and behind the next index to start read-ahead from */
1252         if (i != end + 1) {
1253                 spin_lock(&ras->ras_lock);
1254                 if (i < ras->ras_next_readahead &&
1255                     index_in_window(i, ras->ras_window_start, 0,
1256                                     ras->ras_window_len)) {
1257                         ras->ras_next_readahead = i;
1258                         RAS_CDEBUG(ras);
1259                 }
1260                 spin_unlock(&ras->ras_lock);
1261         }
1262
1263         RETURN(ret);
1264 }
1265
1266 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
1267 {
1268         ras->ras_window_start = index & (~((1024 * 1024 >> CFS_PAGE_SHIFT) - 1));
1269 }
1270
1271 /* called with the ras_lock held or from places where it doesn't matter */
1272 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
1273 {
1274         ras->ras_last_readpage = index;
1275         ras->ras_consecutive_requests = 0;
1276         ras->ras_consecutive_pages = 0;
1277         ras->ras_window_len = 0;
1278         ras_set_start(ras, index);
1279         ras->ras_next_readahead = max(ras->ras_window_start, index);
1280
1281         RAS_CDEBUG(ras);
1282 }
1283
1284 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
1285 {
1286         spin_lock_init(&ras->ras_lock);
1287         ras_reset(ras, 0);
1288         ras->ras_requests = 0;
1289         INIT_LIST_HEAD(&ras->ras_read_beads);
1290 }
1291
1292 static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
1293                        struct ll_readahead_state *ras, unsigned long index,
1294                        unsigned hit)
1295 {
1296         struct ll_ra_info *ra = &sbi->ll_ra_info;
1297         int zero = 0;
1298         ENTRY;
1299
1300         spin_lock(&sbi->ll_lock);
1301         spin_lock(&ras->ras_lock);
1302
1303         ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
1304
1305         /* reset the read-ahead window in two cases.  First when the app seeks
1306          * or reads to some other part of the file.  Secondly if we get a
1307          * read-ahead miss that we think we've previously issued.  This can
1308          * be a symptom of there being so many read-ahead pages that the VM is
1309          * reclaiming it before we get to it. */
1310         if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
1311                 zero = 1;
1312                 ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
1313         } else if (!hit && ras->ras_window_len &&
1314                    index < ras->ras_next_readahead &&
1315                    index_in_window(index, ras->ras_window_start, 0,
1316                                    ras->ras_window_len)) {
1317                 zero = 1;
1318                 ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
1319         }
1320
1321         /* On the second access to a file smaller than the tunable
1322          * ra_max_read_ahead_whole_pages trigger RA on all pages in the
1323          * file up to ra_max_pages.  This is simply a best effort and
1324          * only occurs once per open file.  Normal RA behavior is reverted
1325          * to for subsequent IO.  The mmap case does not increment
1326          * ras_requests and thus can never trigger this behavior. */
1327         if (ras->ras_requests == 2 && !ras->ras_request_index) {
1328                 __u64 kms_pages;
1329
1330                 kms_pages = (i_size_read(inode) + CFS_PAGE_SIZE - 1) >>
1331                             CFS_PAGE_SHIFT;
1332
1333                 CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages,
1334                        ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages);
1335
1336                 if (kms_pages &&
1337                     kms_pages <= ra->ra_max_read_ahead_whole_pages) {
1338                         ras->ras_window_start = 0;
1339                         ras->ras_last_readpage = 0;
1340                         ras->ras_next_readahead = 0;
1341                         ras->ras_window_len = min(ra->ra_max_pages,
1342                                 ra->ra_max_read_ahead_whole_pages);
1343                         GOTO(out_unlock, 0);
1344                 }
1345         }
1346
1347         if (zero) {
1348                 ras_reset(ras, index);
1349                 GOTO(out_unlock, 0);
1350         }
1351
1352         ras->ras_last_readpage = index;
1353         ras->ras_consecutive_pages++;
1354         ras_set_start(ras, index);
1355         ras->ras_next_readahead = max(ras->ras_window_start,
1356                                       ras->ras_next_readahead);
1357
1358         /* Trigger RA in the mmap case where ras_consecutive_requests
1359          * is not incremented and thus can't be used to trigger RA */
1360         if (!ras->ras_window_len && ras->ras_consecutive_pages == 3) {
1361                 ras->ras_window_len = 1024 * 1024 >> CFS_PAGE_SHIFT;
1362                 GOTO(out_unlock, 0);
1363         }
1364
1365         /* The initial ras_window_len is set to the request size.  To avoid
1366          * uselessly reading and discarding pages for random IO the window is
1367          * only increased once per consecutive request received. */
1368         if (ras->ras_consecutive_requests > 1 && !ras->ras_request_index) {
1369                 ras->ras_window_len = min(ras->ras_window_len +
1370                                           (1024 * 1024 >> CFS_PAGE_SHIFT),
1371                                           ra->ra_max_pages);
1372         }
1373
1374         EXIT;
1375 out_unlock:
1376         RAS_CDEBUG(ras);
1377         ras->ras_request_index++;
1378         spin_unlock(&ras->ras_lock);
1379         spin_unlock(&sbi->ll_lock);
1380         return;
1381 }
1382
1383 int ll_writepage(struct page *page)
1384 {
1385         struct inode *inode = page->mapping->host;
1386         struct ll_inode_info *lli = ll_i2info(inode);
1387         struct obd_export *exp;
1388         struct ll_async_page *llap;
1389         int rc = 0;
1390         ENTRY;
1391
1392         LASSERT(!PageDirty(page));
1393         LASSERT(PageLocked(page));
1394
1395         exp = ll_i2obdexp(inode);
1396         if (exp == NULL)
1397                 GOTO(out, rc = -EINVAL);
1398
1399         llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
1400         if (IS_ERR(llap))
1401                 GOTO(out, rc = PTR_ERR(llap));
1402
1403         LASSERT(!llap->llap_nocache);
1404         LASSERT(!PageWriteback(page));
1405         set_page_writeback(page);
1406
1407         page_cache_get(page);
1408         if (llap->llap_write_queued) {
1409                 LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
1410                 rc = obd_set_async_flags(exp, lli->lli_smd, NULL,
1411                                          llap->llap_cookie,
1412                                          ASYNC_READY | ASYNC_URGENT);
1413         } else {
1414                 rc = queue_or_sync_write(exp, inode, llap, CFS_PAGE_SIZE,
1415                                          ASYNC_READY | ASYNC_URGENT);
1416         }
1417         if (rc)
1418                 page_cache_release(page);
1419 out:
1420         if (rc) {
1421                 if (!lli->lli_async_rc)
1422                         lli->lli_async_rc = rc;
1423                 /* re-dirty page on error so it retries write */
1424                 if (PageWriteback(page)) {
1425                         end_page_writeback(page);
1426                 }
1427                 /* resend page only for not started IO*/
1428                 if (!PageError(page))
1429                         ll_redirty_page(page);
1430                 unlock_page(page);
1431         }
1432         RETURN(rc);
1433 }
1434
1435 /*
1436  * for now we do our readpage the same on both 2.4 and 2.5.  The kernel's
1437  * read-ahead assumes it is valid to issue readpage all the way up to
1438  * i_size, but our dlm locks make that not the case.  We disable the
1439  * kernel's read-ahead and do our own by walking ahead in the page cache
1440  * checking for dlm lock coverage.  the main difference between 2.4 and
1441  * 2.6 is how read-ahead gets batched and issued, but we're using our own,
1442  * so they look the same.
1443  */
1444 int ll_readpage(struct file *filp, struct page *page)
1445 {
1446         struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
1447         struct inode *inode = page->mapping->host;
1448         struct obd_export *exp;
1449         struct ll_async_page *llap;
1450         struct obd_io_group *oig = NULL;
1451         struct lustre_handle *lockh = NULL;
1452         int rc;
1453         ENTRY;
1454
1455         LASSERT(PageLocked(page));
1456         LASSERT(!PageUptodate(page));
1457         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset=%Lu=%#Lx\n",
1458                inode->i_ino, inode->i_generation, inode,
1459                (((loff_t)page->index) << CFS_PAGE_SHIFT),
1460                (((loff_t)page->index) << CFS_PAGE_SHIFT));
1461         LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
1462
1463         if (!ll_i2info(inode)->lli_smd) {
1464                 /* File with no objects - one big hole */
1465                 /* We use this just for remove_from_page_cache that is not
1466                  * exported, we'd make page back up to date. */
1467                 ll_truncate_complete_page(page);
1468                 clear_page(kmap(page));
1469                 kunmap(page);
1470                 SetPageUptodate(page);
1471                 unlock_page(page);
1472                 RETURN(0);
1473         }
1474
1475         rc = oig_init(&oig);
1476         if (rc < 0)
1477                 GOTO(out, rc);
1478
1479         exp = ll_i2obdexp(inode);
1480         if (exp == NULL)
1481                 GOTO(out, rc = -EINVAL);
1482
1483         if (fd->fd_flags & LL_FILE_GROUP_LOCKED)
1484                 lockh = &fd->fd_cwlockh;
1485
1486         llap = llap_from_page_with_lockh(page, LLAP_ORIGIN_READPAGE, lockh);
1487         if (IS_ERR(llap)) {
1488                 if (PTR_ERR(llap) == -ENOLCK) {
1489                         CWARN("ino %lu page %lu (%llu) not covered by "
1490                               "a lock (mmap?).  check debug logs.\n",
1491                               inode->i_ino, page->index,
1492                               (long long)page->index << PAGE_CACHE_SHIFT);
1493                 }
1494                 GOTO(out, rc = PTR_ERR(llap));
1495         }
1496
1497         if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1498                 ras_update(ll_i2sbi(inode), inode, &fd->fd_ras, page->index,
1499                            llap->llap_defer_uptodate);
1500
1501
1502         if (llap->llap_defer_uptodate) {
1503                 /* This is the callpath if we got the page from a readahead */
1504                 llap->llap_ra_used = 1;
1505                 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1506                                   fd->fd_flags);
1507                 if (rc > 0)
1508                         obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
1509                                              NULL, oig);
1510                 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
1511                 SetPageUptodate(page);
1512                 unlock_page(page);
1513                 GOTO(out_oig, rc = 0);
1514         }
1515
1516         rc = ll_issue_page_read(exp, llap, oig, 0);
1517         if (rc)
1518                 GOTO(out, rc);
1519
1520         LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
1521         /* We have just requested the actual page we want, see if we can tack
1522          * on some readahead to that page's RPC before it is sent. */
1523         if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1524                 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1525                              fd->fd_flags);
1526
1527         rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
1528
1529 out:
1530         if (rc)
1531                 unlock_page(page);
1532 out_oig:
1533         if (oig != NULL)
1534                 oig_release(oig);
1535         RETURN(rc);
1536 }
1537
1538 static void ll_file_put_pages(struct page **pages, int numpages)
1539 {
1540         int i;
1541         struct page **pp;
1542         ENTRY;
1543
1544         for (i = 0, pp = pages; i < numpages; i++, pp++) {
1545                 if (*pp) {
1546                         LL_CDEBUG_PAGE(D_PAGE, (*pp), "free\n");
1547                         __ll_put_llap(*pp);
1548                         if (page_private(*pp))
1549                                 CERROR("the llap wasn't freed\n");
1550                         (*pp)->mapping = NULL;
1551                         if (page_count(*pp) != 1)
1552                                 CERROR("page %p, flags %#lx, count %i, private %p\n",
1553                                 (*pp), (unsigned long)(*pp)->flags, page_count(*pp),
1554                                 (void*)page_private(*pp));
1555                         __free_pages(*pp, 0);
1556                 }
1557         }
1558         OBD_FREE(pages, numpages * sizeof(struct page*));
1559         EXIT;
1560 }
1561
1562 static struct page **ll_file_prepare_pages(int numpages, struct inode *inode,
1563                                            unsigned long first)
1564 {
1565         struct page **pages;
1566         int i;
1567         int rc = 0;
1568         ENTRY;
1569
1570         OBD_ALLOC(pages, sizeof(struct page *) * numpages);
1571         if (pages == NULL)
1572                 RETURN(ERR_PTR(-ENOMEM));
1573         for (i = 0; i < numpages; i++) {
1574                 struct page *page;
1575                 struct ll_async_page *llap;
1576
1577                 page = alloc_pages(GFP_HIGHUSER, 0);
1578                 if (page == NULL)
1579                         GOTO(err, rc = -ENOMEM);
1580                 pages[i] = page;
1581                 /* llap_from_page needs page index and mapping to be set */
1582                 page->index = first++;
1583                 page->mapping = inode->i_mapping;
1584                 llap = llap_from_page(page, LLAP_ORIGIN_LOCKLESS_IO);
1585                 if (IS_ERR(llap))
1586                         GOTO(err, rc = PTR_ERR(llap));
1587                 llap->llap_lockless_io_page = 1;
1588         }
1589         RETURN(pages);
1590 err:
1591         ll_file_put_pages(pages, numpages);
1592         RETURN(ERR_PTR(rc));
1593  }
1594
1595 static ssize_t ll_file_copy_pages(struct page **pages, int numpages,
1596                                   char *buf, loff_t pos, size_t count,
1597                                   int rw)
1598 {
1599         ssize_t amount = 0;
1600         int i;
1601         int updatechecksum = ll_i2sbi(pages[0]->mapping->host)->ll_flags &
1602                              LL_SBI_LLITE_CHECKSUM;
1603         ENTRY;
1604
1605         for (i = 0; i < numpages; i++) {
1606                 unsigned offset, bytes, left;
1607                 char *vaddr;
1608
1609                 vaddr = kmap(pages[i]);
1610                 offset = pos & (CFS_PAGE_SIZE - 1);
1611                 bytes = min_t(unsigned, CFS_PAGE_SIZE - offset, count);
1612                 LL_CDEBUG_PAGE(D_PAGE, pages[i], "op = %s, addr = %p, "
1613                                "buf = %p, bytes = %u\n",
1614                                (rw == WRITE) ? "CFU" : "CTU",
1615                                vaddr + offset, buf, bytes);
1616                 if (rw == WRITE) {
1617                         left = copy_from_user(vaddr + offset, buf, bytes);
1618                         if (updatechecksum) {
1619                                 struct ll_async_page *llap;
1620
1621                                 llap = llap_cast_private(pages[i]);
1622                                 llap->llap_checksum = crc32_le(0, vaddr,
1623                                                                CFS_PAGE_SIZE);
1624                         }
1625                 } else {
1626                         left = copy_to_user(buf, vaddr + offset, bytes);
1627                 }
1628                 kunmap(pages[i]);
1629                 amount += bytes;
1630                 if (left) {
1631                         amount -= left;
1632                         break;
1633                 }
1634                 buf += bytes;
1635                 count -= bytes;
1636                 pos += bytes;
1637         }
1638         if (amount == 0)
1639                 RETURN(-EFAULT);
1640         RETURN(amount);
1641 }
1642
1643 static int ll_file_oig_pages(struct inode * inode, struct page **pages,
1644                              int numpages, loff_t pos, size_t count, int rw)
1645 {
1646         struct obd_io_group *oig;
1647         struct ll_inode_info *lli = ll_i2info(inode);
1648         struct obd_export *exp;
1649         loff_t org_pos = pos;
1650         obd_flag brw_flags;
1651         int rc;
1652         int i;
1653         ENTRY;
1654
1655         exp = ll_i2obdexp(inode);
1656         if (exp == NULL)
1657                 RETURN(-EINVAL);
1658         rc = oig_init(&oig);
1659         if (rc)
1660                 RETURN(rc);
1661         brw_flags = OBD_BRW_SRVLOCK;
1662         if (capable(CAP_SYS_RESOURCE))
1663                 brw_flags |= OBD_BRW_NOQUOTA;
1664
1665         for (i = 0; i < numpages; i++) {
1666                 struct ll_async_page *llap;
1667                 unsigned from, bytes;
1668
1669                 from = pos & (CFS_PAGE_SIZE - 1);
1670                 bytes = min_t(unsigned, CFS_PAGE_SIZE - from,
1671                               count - pos + org_pos);
1672                 llap = llap_cast_private(pages[i]);
1673                 LASSERT(llap);
1674
1675                 lock_page(pages[i]);
1676
1677                 LL_CDEBUG_PAGE(D_PAGE, pages[i], "offset "LPU64","
1678                                " from %u, bytes = %u\n",
1679                                pos, from, bytes);
1680                 LASSERTF(pos >> CFS_PAGE_SHIFT == pages[i]->index,
1681                          "wrong page index %lu (%lu)\n",
1682                          pages[i]->index,
1683                          (unsigned long)(pos >> CFS_PAGE_SHIFT));
1684                 rc = obd_queue_group_io(exp, lli->lli_smd, NULL, oig,
1685                                         llap->llap_cookie,
1686                                         (rw == WRITE) ?
1687                                         OBD_BRW_WRITE:OBD_BRW_READ,
1688                                         from, bytes, brw_flags,
1689                                         ASYNC_READY | ASYNC_URGENT |
1690                                         ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
1691                 if (rc) {
1692                         i++;
1693                         GOTO(out, rc);
1694                 }
1695                 pos += bytes;
1696         }
1697         rc = obd_trigger_group_io(exp, lli->lli_smd, NULL, oig);
1698         if (rc)
1699                 GOTO(out, rc);
1700         rc = oig_wait(oig);
1701 out:
1702         while(--i >= 0)
1703                 unlock_page(pages[i]);
1704         oig_release(oig);
1705         RETURN(rc);
1706 }
1707
1708 ssize_t ll_file_lockless_io(struct file *file, char *buf, size_t count,
1709                                    loff_t *ppos, int rw)
1710 {
1711         loff_t pos;
1712         struct inode *inode = file->f_dentry->d_inode;
1713         ssize_t rc = 0;
1714         int max_pages;
1715         size_t amount = 0;
1716         unsigned long first, last;
1717         ENTRY;
1718
1719         if (rw == READ) {
1720                 loff_t isize;
1721
1722                 ll_inode_size_lock(inode, 0);
1723                 isize = i_size_read(inode);
1724                 ll_inode_size_unlock(inode, 0);
1725                 if (*ppos >= isize)
1726                         GOTO(out, rc = 0);
1727                 if (*ppos + count >= isize)
1728                         count -= *ppos + count - isize;
1729                 if (count == 0)
1730                         GOTO(out, rc);
1731         } else {
1732                 rc = generic_write_checks(file, ppos, &count, 0);
1733                 if (rc)
1734                         GOTO(out, rc);
1735                 rc = remove_suid(file->f_dentry);
1736                 if (rc)
1737                         GOTO(out, rc);
1738         }
1739         pos = *ppos;
1740         first = pos >> CFS_PAGE_SHIFT;
1741         last = (pos + count - 1) >> CFS_PAGE_SHIFT;
1742         max_pages = PTLRPC_MAX_BRW_PAGES *
1743                 ll_i2info(inode)->lli_smd->lsm_stripe_count;
1744         CDEBUG(D_INFO, "%u, stripe_count = %u\n",
1745                PTLRPC_MAX_BRW_PAGES /* max_pages_per_rpc */,
1746                ll_i2info(inode)->lli_smd->lsm_stripe_count);
1747
1748         while (first <= last && rc >= 0) {
1749                 int pages_for_io;
1750                 struct page **pages;
1751                 size_t bytes = count - amount;
1752
1753                 pages_for_io = min_t(int, last - first + 1, max_pages);
1754                 pages = ll_file_prepare_pages(pages_for_io, inode, first);
1755                 if (IS_ERR(pages)) {
1756                         rc = PTR_ERR(pages);
1757                         break;
1758                 }
1759                 if (rw == WRITE) {
1760                         rc = ll_file_copy_pages(pages, pages_for_io, buf,
1761                                                 pos + amount, bytes, rw);
1762                         if (rc < 0)
1763                                 GOTO(put_pages, rc);
1764                         bytes = rc;
1765                 }
1766                 rc = ll_file_oig_pages(inode, pages, pages_for_io,
1767                                        pos + amount, bytes, rw);
1768                 if (rc)
1769                         GOTO(put_pages, rc);
1770                 if (rw == READ) {
1771                         rc = ll_file_copy_pages(pages, pages_for_io, buf,
1772                                                 pos + amount, bytes, rw);
1773                         if (rc < 0)
1774                                 GOTO(put_pages, rc);
1775                         bytes = rc;
1776                 }
1777                 amount += bytes;
1778                 buf += bytes;
1779 put_pages:
1780                 ll_file_put_pages(pages, pages_for_io);
1781                 first += pages_for_io;
1782                 /* a short read/write check */
1783                 if (pos + amount < ((loff_t)first << CFS_PAGE_SHIFT))
1784                         break;
1785         }
1786         /* NOTE: don't update i_size and KMS in absence of LDLM locks even
1787          * write makes the file large */
1788         file_accessed(file);
1789         if (rw == READ && amount < count && rc == 0) {
1790                 unsigned long not_cleared;
1791
1792                 not_cleared = clear_user(buf, count - amount);
1793                 amount = count - not_cleared;
1794                 if (not_cleared)
1795                         rc = -EFAULT;
1796         }
1797         if (amount > 0) {
1798                 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
1799                                     (rw == WRITE) ?
1800                                     LPROC_LL_LOCKLESS_WRITE :
1801                                     LPROC_LL_LOCKLESS_READ,
1802                                     (long)amount);
1803                 *ppos += amount;
1804                 RETURN(amount);
1805         }
1806 out:
1807         RETURN(rc);
1808 }