1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Lustre Lite I/O page cache routines shared by different kernel revs
6 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #ifndef AUTOCONF_INCLUDED
24 #include <linux/config.h>
26 #include <linux/kernel.h>
28 #include <linux/string.h>
29 #include <linux/stat.h>
30 #include <linux/errno.h>
31 #include <linux/smp_lock.h>
32 #include <linux/unistd.h>
33 #include <linux/version.h>
34 #include <asm/system.h>
35 #include <asm/uaccess.h>
38 #include <linux/stat.h>
39 #include <asm/uaccess.h>
40 #include <asm/segment.h>
42 #include <linux/pagemap.h>
43 #include <linux/smp_lock.h>
45 #define DEBUG_SUBSYSTEM S_LLITE
47 #include <lustre_lite.h>
48 #include "llite_internal.h"
49 #include <linux/lustre_compat25.h>
51 #ifndef list_for_each_prev_safe
52 #define list_for_each_prev_safe(pos, n, head) \
53 for (pos = (head)->prev, n = pos->prev; pos != (head); \
54 pos = n, n = pos->prev )
57 cfs_mem_cache_t *ll_async_page_slab = NULL;
58 size_t ll_async_page_slab_size = 0;
60 /* SYNCHRONOUS I/O to object storage for an inode */
61 static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
62 struct page *page, int flags)
64 struct ll_inode_info *lli = ll_i2info(inode);
65 struct lov_stripe_md *lsm = lli->lli_smd;
66 struct obd_info oinfo = { { { 0 } } };
72 pg.off = ((obd_off)page->index) << CFS_PAGE_SHIFT;
74 if ((cmd & OBD_BRW_WRITE) && (pg.off+CFS_PAGE_SIZE>i_size_read(inode)))
75 pg.count = i_size_read(inode) % CFS_PAGE_SIZE;
77 pg.count = CFS_PAGE_SIZE;
79 LL_CDEBUG_PAGE(D_PAGE, page, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
80 cmd & OBD_BRW_WRITE ? "write" : "read", pg.count,
81 inode->i_ino, pg.off, pg.off);
83 CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
84 LPU64"\n", inode->i_ino, inode, i_size_read(inode),
85 page->mapping->host, i_size_read(page->mapping->host),
91 if (cmd & OBD_BRW_WRITE)
92 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_WRITE,
95 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_BRW_READ,
99 rc = obd_brw(cmd, ll_i2obdexp(inode), &oinfo, 1, &pg, NULL);
101 obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
103 CERROR("error from obd_brw: rc = %d\n", rc);
107 int ll_file_punch(struct inode * inode, loff_t new_size, int srvlock)
109 struct ll_inode_info *lli = ll_i2info(inode);
110 struct obd_info oinfo = { { { 0 } } };
115 CDEBUG(D_INFO, "calling punch for "LPX64" (new size %Lu=%#Lx)\n",
116 lli->lli_smd->lsm_object_id, new_size, new_size);
118 oinfo.oi_md = lli->lli_smd;
119 oinfo.oi_policy.l_extent.start = new_size;
120 oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
122 oa.o_id = lli->lli_smd->lsm_object_id;
123 oa.o_valid = OBD_MD_FLID;
124 oa.o_flags = srvlock ? OBD_FL_TRUNCLOCK : 0;
125 obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |OBD_MD_FLFID|
126 OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME |
127 OBD_MD_FLUID | OBD_MD_FLGID | OBD_MD_FLGENER |
129 rc = obd_punch_rqset(ll_i2obdexp(inode), &oinfo, NULL);
131 CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
134 obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
135 OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
138 /* this isn't where truncate starts. roughly:
139 * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate. setattr_raw grabs
140 * DLM lock on [size, EOF], i_mutex, ->lli_size_sem, and WRITE_I_ALLOC_SEM to
143 * must be called under ->lli_size_sem */
144 void ll_truncate(struct inode *inode)
146 struct ll_inode_info *lli = ll_i2info(inode);
147 int srvlock = test_bit(LLI_F_SRVLOCK, &lli->lli_flags);
150 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu=%#Lx\n",inode->i_ino,
151 inode->i_generation, inode, i_size_read(inode), i_size_read(inode));
153 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
154 if (lli->lli_size_sem_owner != current) {
160 CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
165 LASSERT(atomic_read(&lli->lli_size_sem.count) <= 0);
171 /* XXX I'm pretty sure this is a hack to paper over a more fundamental
173 lov_stripe_lock(lli->lli_smd);
174 inode_init_lvb(inode, &lvb);
175 rc = obd_merge_lvb(ll_i2obdexp(inode), lli->lli_smd, &lvb, 0);
176 inode->i_blocks = lvb.lvb_blocks;
177 if (lvb.lvb_size == i_size_read(inode) && rc == 0) {
178 CDEBUG(D_VFSTRACE, "skipping punch for obj "LPX64", %Lu=%#Lx\n",
179 lli->lli_smd->lsm_object_id, i_size_read(inode),
181 lov_stripe_unlock(lli->lli_smd);
185 obd_adjust_kms(ll_i2obdexp(inode), lli->lli_smd,
186 i_size_read(inode), 1);
187 lov_stripe_unlock(lli->lli_smd);
190 if (unlikely((ll_i2sbi(inode)->ll_flags & LL_SBI_LLITE_CHECKSUM) &&
191 (i_size_read(inode) & ~CFS_PAGE_MASK))) {
192 /* If the truncate leaves a partial page, update its checksum */
193 struct page *page = find_get_page(inode->i_mapping,
194 i_size_read(inode) >>
197 struct ll_async_page *llap = llap_cast_private(page);
199 char *kaddr = kmap_atomic(page, KM_USER0);
200 llap->llap_checksum =
201 crc32_le(0, kaddr, CFS_PAGE_SIZE);
202 kunmap_atomic(kaddr, KM_USER0);
204 page_cache_release(page);
208 new_size = i_size_read(inode);
209 ll_inode_size_unlock(inode, 0);
211 ll_file_punch(inode, new_size, 0);
217 ll_inode_size_unlock(inode, 0);
220 int ll_prepare_write(struct file *file, struct page *page, unsigned from,
223 struct inode *inode = page->mapping->host;
224 struct ll_inode_info *lli = ll_i2info(inode);
225 struct lov_stripe_md *lsm = lli->lli_smd;
226 obd_off offset = ((obd_off)page->index) << CFS_PAGE_SHIFT;
227 struct obd_info oinfo = { { { 0 } } };
234 LASSERT(PageLocked(page));
235 (void)llap_cast_private(page); /* assertion */
237 /* Check to see if we should return -EIO right away */
240 pga.count = CFS_PAGE_SIZE;
243 oa.o_mode = inode->i_mode;
244 oa.o_id = lsm->lsm_object_id;
245 oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | OBD_MD_FLTYPE;
246 obdo_from_inode(&oa, inode, OBD_MD_FLFID | OBD_MD_FLGENER);
250 rc = obd_brw(OBD_BRW_CHECK, ll_i2obdexp(inode), &oinfo, 1, &pga, NULL);
254 if (PageUptodate(page)) {
255 LL_CDEBUG_PAGE(D_PAGE, page, "uptodate\n");
259 /* We're completely overwriting an existing page, so _don't_ set it up
260 * to date until commit_write */
261 if (from == 0 && to == CFS_PAGE_SIZE) {
262 LL_CDEBUG_PAGE(D_PAGE, page, "full page write\n");
263 POISON_PAGE(page, 0x11);
267 /* If are writing to a new page, no need to read old data. The extent
268 * locking will have updated the KMS, and for our purposes here we can
269 * treat it like i_size. */
270 lov_stripe_lock(lsm);
271 inode_init_lvb(inode, &lvb);
272 obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
273 lov_stripe_unlock(lsm);
274 if (lvb.lvb_size <= offset) {
275 char *kaddr = kmap_atomic(page, KM_USER0);
276 LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
277 lvb.lvb_size, offset);
278 memset(kaddr, 0, CFS_PAGE_SIZE);
279 kunmap_atomic(kaddr, KM_USER0);
280 GOTO(prepare_done, rc = 0);
283 /* XXX could be an async ocp read.. read-ahead? */
284 rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
286 /* bug 1598: don't clobber blksize */
287 oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
288 obdo_refresh_inode(inode, &oa, oa.o_valid);
294 SetPageUptodate(page);
299 static int ll_ap_make_ready(void *data, int cmd)
301 struct ll_async_page *llap;
305 llap = LLAP_FROM_COOKIE(data);
306 page = llap->llap_page;
308 LASSERTF(!(cmd & OBD_BRW_READ), "cmd %x page %p ino %lu index %lu\n", cmd, page,
309 page->mapping->host->i_ino, page->index);
311 /* we're trying to write, but the page is locked.. come back later */
312 if (TryLockPage(page))
315 LASSERT(!PageWriteback(page));
317 /* if we left PageDirty we might get another writepage call
318 * in the future. list walkers are bright enough
319 * to check page dirty so we can leave it on whatever list
320 * its on. XXX also, we're called with the cli list so if
321 * we got the page cache list we'd create a lock inversion
322 * with the removepage path which gets the page lock then the
324 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
325 clear_page_dirty(page);
327 LASSERTF(!PageWriteback(page),"cmd %x page %p ino %lu index %lu\n", cmd, page,
328 page->mapping->host->i_ino, page->index);
329 clear_page_dirty_for_io(page);
331 /* This actually clears the dirty bit in the radix tree.*/
332 set_page_writeback(page);
335 LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
336 page_cache_get(page);
341 /* We have two reasons for giving llite the opportunity to change the
342 * write length of a given queued page as it builds the RPC containing
345 * 1) Further extending writes may have landed in the page cache
346 * since a partial write first queued this page requiring us
347 * to write more from the page cache. (No further races are possible, since
348 * by the time this is called, the page is locked.)
349 * 2) We might have raced with truncate and want to avoid performing
350 * write RPCs that are just going to be thrown away by the
351 * truncate's punch on the storage targets.
353 * The kms serves these purposes as it is set at both truncate and extending
356 static int ll_ap_refresh_count(void *data, int cmd)
358 struct ll_inode_info *lli;
359 struct ll_async_page *llap;
360 struct lov_stripe_md *lsm;
367 /* readpage queues with _COUNT_STABLE, shouldn't get here. */
368 LASSERT(cmd != OBD_BRW_READ);
370 llap = LLAP_FROM_COOKIE(data);
371 page = llap->llap_page;
372 inode = page->mapping->host;
373 lli = ll_i2info(inode);
376 lov_stripe_lock(lsm);
377 inode_init_lvb(inode, &lvb);
378 obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
380 lov_stripe_unlock(lsm);
382 /* catch race with truncate */
383 if (((__u64)page->index << CFS_PAGE_SHIFT) >= kms)
386 /* catch sub-page write at end of file */
387 if (((__u64)page->index << CFS_PAGE_SHIFT) + CFS_PAGE_SIZE > kms)
388 return kms % CFS_PAGE_SIZE;
390 return CFS_PAGE_SIZE;
393 void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
395 struct lov_stripe_md *lsm;
396 obd_flag valid_flags;
398 lsm = ll_i2info(inode)->lli_smd;
400 oa->o_id = lsm->lsm_object_id;
401 oa->o_valid = OBD_MD_FLID;
402 valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
403 if (cmd & OBD_BRW_WRITE) {
404 oa->o_valid |= OBD_MD_FLEPOCH;
405 oa->o_easize = ll_i2info(inode)->lli_io_epoch;
407 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
408 OBD_MD_FLUID | OBD_MD_FLGID |
409 OBD_MD_FLFID | OBD_MD_FLGENER;
412 obdo_from_inode(oa, inode, valid_flags);
415 static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
417 struct ll_async_page *llap;
420 llap = LLAP_FROM_COOKIE(data);
421 ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
426 static void ll_ap_update_obdo(void *data, int cmd, struct obdo *oa,
429 struct ll_async_page *llap;
432 llap = LLAP_FROM_COOKIE(data);
433 obdo_from_inode(oa, llap->llap_page->mapping->host, valid);
438 static struct obd_async_page_ops ll_async_page_ops = {
439 .ap_make_ready = ll_ap_make_ready,
440 .ap_refresh_count = ll_ap_refresh_count,
441 .ap_fill_obdo = ll_ap_fill_obdo,
442 .ap_update_obdo = ll_ap_update_obdo,
443 .ap_completion = ll_ap_completion,
446 struct ll_async_page *llap_cast_private(struct page *page)
448 struct ll_async_page *llap = (struct ll_async_page *)page_private(page);
450 LASSERTF(llap == NULL || llap->llap_magic == LLAP_MAGIC,
451 "page %p private %lu gave magic %d which != %d\n",
452 page, page_private(page), llap->llap_magic, LLAP_MAGIC);
457 /* Try to shrink the page cache for the @sbi filesystem by 1/@shrink_fraction.
459 * There is an llap attached onto every page in lustre, linked off @sbi.
460 * We add an llap to the list so we don't lose our place during list walking.
461 * If llaps in the list are being moved they will only move to the end
462 * of the LRU, and we aren't terribly interested in those pages here (we
463 * start at the beginning of the list where the least-used llaps are.
465 int llap_shrink_cache(struct ll_sb_info *sbi, int shrink_fraction)
467 struct ll_async_page *llap, dummy_llap = { .llap_magic = 0xd11ad11a };
468 unsigned long total, want, count = 0;
470 total = sbi->ll_async_page_count;
472 /* There can be a large number of llaps (600k or more in a large
473 * memory machine) so the VM 1/6 shrink ratio is likely too much.
474 * Since we are freeing pages also, we don't necessarily want to
475 * shrink so much. Limit to 40MB of pages + llaps per call. */
476 if (shrink_fraction == 0)
477 want = sbi->ll_async_page_count - sbi->ll_async_page_max + 32;
479 want = (total + shrink_fraction - 1) / shrink_fraction;
481 if (want > 40 << (20 - CFS_PAGE_SHIFT))
482 want = 40 << (20 - CFS_PAGE_SHIFT);
484 CDEBUG(D_CACHE, "shrinking %lu of %lu pages (1/%d)\n",
485 want, total, shrink_fraction);
487 spin_lock(&sbi->ll_lock);
488 list_add(&dummy_llap.llap_pglist_item, &sbi->ll_pglist);
490 while (--total >= 0 && count < want) {
494 if (unlikely(need_resched())) {
495 spin_unlock(&sbi->ll_lock);
497 spin_lock(&sbi->ll_lock);
500 llap = llite_pglist_next_llap(sbi,&dummy_llap.llap_pglist_item);
501 list_del_init(&dummy_llap.llap_pglist_item);
505 page = llap->llap_page;
506 LASSERT(page != NULL);
508 list_add(&dummy_llap.llap_pglist_item, &llap->llap_pglist_item);
510 /* Page needs/undergoing IO */
511 if (TryLockPage(page)) {
512 LL_CDEBUG_PAGE(D_PAGE, page, "can't lock\n");
516 keep = (llap->llap_write_queued || PageDirty(page) ||
517 PageWriteback(page) || (!PageUptodate(page) &&
518 llap->llap_origin != LLAP_ORIGIN_READAHEAD));
520 LL_CDEBUG_PAGE(D_PAGE, page,"%s LRU page: %s%s%s%s%s origin %s\n",
521 keep ? "keep" : "drop",
522 llap->llap_write_queued ? "wq " : "",
523 PageDirty(page) ? "pd " : "",
524 PageUptodate(page) ? "" : "!pu ",
525 PageWriteback(page) ? "wb" : "",
526 llap->llap_defer_uptodate ? "" : "!du",
527 llap_origins[llap->llap_origin]);
529 /* If page is dirty or undergoing IO don't discard it */
535 page_cache_get(page);
536 spin_unlock(&sbi->ll_lock);
538 if (page->mapping != NULL) {
539 ll_teardown_mmaps(page->mapping,
540 (__u64)page->index << CFS_PAGE_SHIFT,
541 ((__u64)page->index << CFS_PAGE_SHIFT)|
543 if (!PageDirty(page) && !page_mapped(page)) {
544 ll_ra_accounting(llap, page->mapping);
545 ll_truncate_complete_page(page);
548 LL_CDEBUG_PAGE(D_PAGE, page, "Not dropping page"
556 page_cache_release(page);
558 spin_lock(&sbi->ll_lock);
560 list_del(&dummy_llap.llap_pglist_item);
561 spin_unlock(&sbi->ll_lock);
563 CDEBUG(D_CACHE, "shrank %lu/%lu and left %lu unscanned\n",
569 static struct ll_async_page *llap_from_page_with_lockh(struct page *page,
571 struct lustre_handle *lockh)
573 struct ll_async_page *llap;
574 struct obd_export *exp;
575 struct inode *inode = page->mapping->host;
576 struct ll_sb_info *sbi;
581 static int triggered;
584 LL_CDEBUG_PAGE(D_ERROR, page, "Bug 10047. Wrong anon "
586 libcfs_debug_dumpstack(NULL);
589 RETURN(ERR_PTR(-EINVAL));
591 sbi = ll_i2sbi(inode);
592 LASSERT(ll_async_page_slab);
593 LASSERTF(origin < LLAP__ORIGIN_MAX, "%u\n", origin);
595 llap = llap_cast_private(page);
597 /* move to end of LRU list, except when page is just about to
599 if (origin != LLAP_ORIGIN_REMOVEPAGE) {
600 spin_lock(&sbi->ll_lock);
601 sbi->ll_pglist_gen++;
602 list_del_init(&llap->llap_pglist_item);
603 list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
604 spin_unlock(&sbi->ll_lock);
609 exp = ll_i2obdexp(page->mapping->host);
611 RETURN(ERR_PTR(-EINVAL));
613 /* limit the number of lustre-cached pages */
614 if (sbi->ll_async_page_count >= sbi->ll_async_page_max)
615 llap_shrink_cache(sbi, 0);
617 OBD_SLAB_ALLOC(llap, ll_async_page_slab, CFS_ALLOC_STD,
618 ll_async_page_slab_size);
620 RETURN(ERR_PTR(-ENOMEM));
621 llap->llap_magic = LLAP_MAGIC;
622 llap->llap_cookie = (void *)llap + size_round(sizeof(*llap));
624 /* XXX: for bug 11270 - check for lockless origin here! */
625 if (origin == LLAP_ORIGIN_LOCKLESS_IO)
626 llap->llap_nocache = 1;
628 rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd, NULL, page,
629 (obd_off)page->index << CFS_PAGE_SHIFT,
630 &ll_async_page_ops, llap, &llap->llap_cookie,
631 llap->llap_nocache, lockh);
633 OBD_SLAB_FREE(llap, ll_async_page_slab,
634 ll_async_page_slab_size);
638 CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
639 page, llap->llap_cookie, (obd_off)page->index << CFS_PAGE_SHIFT);
640 /* also zeroing the PRIVBITS low order bitflags */
641 __set_page_ll_data(page, llap);
642 llap->llap_page = page;
644 spin_lock(&sbi->ll_lock);
645 sbi->ll_pglist_gen++;
646 sbi->ll_async_page_count++;
647 list_add_tail(&llap->llap_pglist_item, &sbi->ll_pglist);
648 spin_unlock(&sbi->ll_lock);
651 if (unlikely(sbi->ll_flags & LL_SBI_LLITE_CHECKSUM)) {
653 char *kaddr = kmap_atomic(page, KM_USER0);
654 csum = crc32_le(csum, kaddr, CFS_PAGE_SIZE);
655 kunmap_atomic(kaddr, KM_USER0);
656 if (origin == LLAP_ORIGIN_READAHEAD ||
657 origin == LLAP_ORIGIN_READPAGE ||
658 origin == LLAP_ORIGIN_LOCKLESS_IO) {
659 llap->llap_checksum = 0;
660 } else if (origin == LLAP_ORIGIN_COMMIT_WRITE ||
661 llap->llap_checksum == 0) {
662 llap->llap_checksum = csum;
663 CDEBUG(D_PAGE, "page %p cksum %x\n", page, csum);
664 } else if (llap->llap_checksum == csum) {
665 /* origin == LLAP_ORIGIN_WRITEPAGE */
666 CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
669 /* origin == LLAP_ORIGIN_WRITEPAGE */
670 LL_CDEBUG_PAGE(D_ERROR, page, "old cksum %x != new "
671 "%x!\n", llap->llap_checksum, csum);
675 llap->llap_origin = origin;
679 static inline struct ll_async_page *llap_from_page(struct page *page,
682 return llap_from_page_with_lockh(page, origin, NULL);
685 static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
686 struct ll_async_page *llap,
687 unsigned to, obd_flag async_flags)
689 unsigned long size_index = i_size_read(inode) >> CFS_PAGE_SHIFT;
690 struct obd_io_group *oig;
691 struct ll_sb_info *sbi = ll_i2sbi(inode);
692 int rc, noquot = llap->llap_ignore_quota ? OBD_BRW_NOQUOTA : 0;
695 /* _make_ready only sees llap once we've unlocked the page */
696 llap->llap_write_queued = 1;
697 rc = obd_queue_async_io(exp, ll_i2info(inode)->lli_smd, NULL,
698 llap->llap_cookie, OBD_BRW_WRITE | noquot,
699 0, 0, 0, async_flags);
701 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "write queued\n");
702 llap_write_pending(inode, llap);
706 llap->llap_write_queued = 0;
712 /* make full-page requests if we are not at EOF (bug 4410) */
713 if (to != CFS_PAGE_SIZE && llap->llap_page->index < size_index) {
714 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
715 "sync write before EOF: size_index %lu, to %d\n",
718 } else if (to != CFS_PAGE_SIZE && llap->llap_page->index == size_index){
719 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
720 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,
721 "sync write at EOF: size_index %lu, to %d/%d\n",
722 size_index, to, size_to);
727 /* compare the checksum once before the page leaves llite */
728 if (unlikely((sbi->ll_flags & LL_SBI_LLITE_CHECKSUM) &&
729 llap->llap_checksum != 0)) {
731 struct page *page = llap->llap_page;
732 char *kaddr = kmap_atomic(page, KM_USER0);
733 csum = crc32_le(csum, kaddr, CFS_PAGE_SIZE);
734 kunmap_atomic(kaddr, KM_USER0);
735 if (llap->llap_checksum == csum) {
736 CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
739 CERROR("page %p old cksum %x != new cksum %x!\n",
740 page, llap->llap_checksum, csum);
744 rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,
745 llap->llap_cookie, OBD_BRW_WRITE | noquot,
746 0, to, 0, ASYNC_READY | ASYNC_URGENT |
747 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
751 rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
757 if (!rc && async_flags & ASYNC_READY) {
758 unlock_page(llap->llap_page);
759 if (PageWriteback(llap->llap_page)) {
760 end_page_writeback(llap->llap_page);
764 LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);
772 /* update our write count to account for i_size increases that may have
773 * happened since we've queued the page for io. */
775 /* be careful not to return success without setting the page Uptodate or
776 * the next pass through prepare_write will read in stale data from disk. */
777 int ll_commit_write(struct file *file, struct page *page, unsigned from,
780 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
781 struct inode *inode = page->mapping->host;
782 struct ll_inode_info *lli = ll_i2info(inode);
783 struct lov_stripe_md *lsm = lli->lli_smd;
784 struct obd_export *exp;
785 struct ll_async_page *llap;
787 struct lustre_handle *lockh = NULL;
791 SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
792 LASSERT(inode == file->f_dentry->d_inode);
793 LASSERT(PageLocked(page));
795 CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
796 inode, page, from, to, page->index);
798 if (fd->fd_flags & LL_FILE_GROUP_LOCKED)
799 lockh = &fd->fd_cwlockh;
801 llap = llap_from_page_with_lockh(page, LLAP_ORIGIN_COMMIT_WRITE, lockh);
803 RETURN(PTR_ERR(llap));
805 exp = ll_i2obdexp(inode);
809 llap->llap_ignore_quota = capable(CAP_SYS_RESOURCE);
811 /* queue a write for some time in the future the first time we
813 if (!PageDirty(page)) {
814 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_MISSES, 1);
816 rc = queue_or_sync_write(exp, inode, llap, to, 0);
820 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_HITS, 1);
823 /* put the page in the page cache, from now on ll_removepage is
824 * responsible for cleaning up the llap.
825 * only set page dirty when it's queued to be write out */
826 if (llap->llap_write_queued)
827 set_page_dirty(page);
830 size = (((obd_off)page->index) << CFS_PAGE_SHIFT) + to;
831 ll_inode_size_lock(inode, 0);
833 lov_stripe_lock(lsm);
834 obd_adjust_kms(exp, lsm, size, 0);
835 lov_stripe_unlock(lsm);
836 if (size > i_size_read(inode))
837 i_size_write(inode, size);
838 SetPageUptodate(page);
839 } else if (size > i_size_read(inode)) {
840 /* this page beyond the pales of i_size, so it can't be
841 * truncated in ll_p_r_e during lock revoking. we must
842 * teardown our book-keeping here. */
845 ll_inode_size_unlock(inode, 0);
849 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
851 struct ll_ra_info *ra = &sbi->ll_ra_info;
855 spin_lock(&sbi->ll_lock);
856 ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);
857 ra->ra_cur_pages += ret;
858 spin_unlock(&sbi->ll_lock);
863 static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
865 struct ll_ra_info *ra = &sbi->ll_ra_info;
866 spin_lock(&sbi->ll_lock);
867 LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",
868 ra->ra_cur_pages, len);
869 ra->ra_cur_pages -= len;
870 spin_unlock(&sbi->ll_lock);
873 /* called for each page in a completed rpc.*/
874 int ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
876 struct ll_async_page *llap;
881 llap = LLAP_FROM_COOKIE(data);
882 page = llap->llap_page;
883 LASSERT(PageLocked(page));
884 LASSERT(CheckWriteback(page,cmd));
886 LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);
888 if (cmd & OBD_BRW_READ && llap->llap_defer_uptodate)
889 ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);
892 if (cmd & OBD_BRW_READ) {
893 if (!llap->llap_defer_uptodate)
894 SetPageUptodate(page);
896 llap->llap_write_queued = 0;
898 ClearPageError(page);
900 if (cmd & OBD_BRW_READ) {
901 llap->llap_defer_uptodate = 0;
904 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
906 set_bit(AS_ENOSPC, &page->mapping->flags);
908 set_bit(AS_EIO, &page->mapping->flags);
910 page->mapping->gfp_mask |= AS_EIO_MASK;
916 if (cmd & OBD_BRW_WRITE) {
917 llap_write_complete(page->mapping->host, llap);
918 ll_try_done_writing(page->mapping->host);
921 if (PageWriteback(page)) {
922 end_page_writeback(page);
924 page_cache_release(page);
929 static void __ll_put_llap(struct page *page)
931 struct inode *inode = page->mapping->host;
932 struct obd_export *exp;
933 struct ll_async_page *llap;
934 struct ll_sb_info *sbi = ll_i2sbi(inode);
938 exp = ll_i2obdexp(inode);
940 CERROR("page %p ind %lu gave null export\n", page, page->index);
945 llap = llap_from_page(page, LLAP_ORIGIN_REMOVEPAGE);
947 CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
948 page->index, PTR_ERR(llap));
953 //llap_write_complete(inode, llap);
954 rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
957 CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
959 /* this unconditional free is only safe because the page lock
960 * is providing exclusivity to memory pressure/truncate/writeback..*/
961 __clear_page_ll_data(page);
963 spin_lock(&sbi->ll_lock);
964 if (!list_empty(&llap->llap_pglist_item))
965 list_del_init(&llap->llap_pglist_item);
966 sbi->ll_pglist_gen++;
967 sbi->ll_async_page_count--;
968 spin_unlock(&sbi->ll_lock);
969 OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);
974 /* the kernel calls us here when a page is unhashed from the page cache.
975 * the page will be locked and the kernel is holding a spinlock, so
976 * we need to be careful. we're just tearing down our book-keeping
978 void ll_removepage(struct page *page)
980 struct ll_async_page *llap = llap_cast_private(page);
983 LASSERT(!in_interrupt());
985 /* sync pages or failed read pages can leave pages in the page
986 * cache that don't have our data associated with them anymore */
987 if (page_private(page) == 0) {
992 LASSERT(!llap->llap_lockless_io_page);
993 LASSERT(!llap->llap_nocache);
995 LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
1001 static int ll_issue_page_read(struct obd_export *exp,
1002 struct ll_async_page *llap,
1003 struct obd_io_group *oig, int defer)
1005 struct page *page = llap->llap_page;
1008 page_cache_get(page);
1009 llap->llap_defer_uptodate = defer;
1010 llap->llap_ra_used = 0;
1011 rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,
1012 NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
1013 CFS_PAGE_SIZE, 0, ASYNC_COUNT_STABLE | ASYNC_READY |
1016 LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
1017 page_cache_release(page);
1022 static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which)
1024 LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
1025 ra->ra_stats[which]++;
1028 static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
1030 struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
1031 struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;
1033 spin_lock(&sbi->ll_lock);
1034 ll_ra_stats_inc_unlocked(ra, which);
1035 spin_unlock(&sbi->ll_lock);
1038 void ll_ra_accounting(struct ll_async_page *llap, struct address_space *mapping)
1040 if (!llap->llap_defer_uptodate || llap->llap_ra_used)
1043 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
1046 #define RAS_CDEBUG(ras) \
1048 "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu\n", \
1049 ras->ras_last_readpage, ras->ras_consecutive_requests, \
1050 ras->ras_consecutive_pages, ras->ras_window_start, \
1051 ras->ras_window_len, ras->ras_next_readahead, \
1052 ras->ras_requests, ras->ras_request_index);
1054 static int index_in_window(unsigned long index, unsigned long point,
1055 unsigned long before, unsigned long after)
1057 unsigned long start = point - before, end = point + after;
1064 return start <= index && index <= end;
1067 static struct ll_readahead_state *ll_ras_get(struct file *f)
1069 struct ll_file_data *fd;
1071 fd = LUSTRE_FPRIVATE(f);
1075 void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
1077 struct ll_readahead_state *ras;
1079 ras = ll_ras_get(f);
1081 spin_lock(&ras->ras_lock);
1082 ras->ras_requests++;
1083 ras->ras_request_index = 0;
1084 ras->ras_consecutive_requests++;
1085 rar->lrr_reader = current;
1087 list_add(&rar->lrr_linkage, &ras->ras_read_beads);
1088 spin_unlock(&ras->ras_lock);
1091 void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
1093 struct ll_readahead_state *ras;
1095 ras = ll_ras_get(f);
1097 spin_lock(&ras->ras_lock);
1098 list_del_init(&rar->lrr_linkage);
1099 spin_unlock(&ras->ras_lock);
1102 static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
1104 struct ll_ra_read *scan;
1106 list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
1107 if (scan->lrr_reader == current)
1113 struct ll_ra_read *ll_ra_read_get(struct file *f)
1115 struct ll_readahead_state *ras;
1116 struct ll_ra_read *bead;
1118 ras = ll_ras_get(f);
1120 spin_lock(&ras->ras_lock);
1121 bead = ll_ra_read_get_locked(ras);
1122 spin_unlock(&ras->ras_lock);
1126 static int ll_readahead(struct ll_readahead_state *ras,
1127 struct obd_export *exp, struct address_space *mapping,
1128 struct obd_io_group *oig, int flags)
1130 unsigned long i, start = 0, end = 0, reserved;
1131 struct ll_async_page *llap;
1133 int rc, ret = 0, match_failed = 0;
1135 unsigned int gfp_mask;
1136 struct inode *inode;
1137 struct lov_stripe_md *lsm;
1138 struct ll_ra_read *bead;
1142 inode = mapping->host;
1143 lsm = ll_i2info(inode)->lli_smd;
1145 lov_stripe_lock(lsm);
1146 inode_init_lvb(inode, &lvb);
1147 obd_merge_lvb(ll_i2obdexp(inode), lsm, &lvb, 1);
1149 lov_stripe_unlock(lsm);
1151 ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
1155 spin_lock(&ras->ras_lock);
1156 bead = ll_ra_read_get_locked(ras);
1157 /* Enlarge the RA window to encompass the full read */
1158 if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
1159 bead->lrr_start + bead->lrr_count) {
1160 ras->ras_window_len = bead->lrr_start + bead->lrr_count -
1161 ras->ras_window_start;
1163 /* Reserve a part of the read-ahead window that we'll be issuing */
1164 if (ras->ras_window_len) {
1165 start = ras->ras_next_readahead;
1166 end = ras->ras_window_start + ras->ras_window_len - 1;
1169 /* Truncate RA window to end of file */
1170 end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
1171 ras->ras_next_readahead = max(end, end + 1);
1174 spin_unlock(&ras->ras_lock);
1177 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
1181 reserved = ll_ra_count_get(ll_i2sbi(inode), end - start + 1);
1182 if (reserved < end - start + 1)
1183 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
1185 gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
1187 gfp_mask |= __GFP_NOWARN;
1190 for (i = start; reserved > 0 && !match_failed && i <= end; i++) {
1191 /* skip locked pages from previous readpage calls */
1192 page = grab_cache_page_nowait_gfp(mapping, i, gfp_mask);
1194 ll_ra_stats_inc(mapping, RA_STAT_FAILED_GRAB_PAGE);
1195 CDEBUG(D_READA, "g_c_p_n failed\n");
1199 /* Check if page was truncated or reclaimed */
1200 if (page->mapping != mapping) {
1201 ll_ra_stats_inc(mapping, RA_STAT_WRONG_GRAB_PAGE);
1202 CDEBUG(D_READA, "g_c_p_n returned invalid page\n");
1206 /* we do this first so that we can see the page in the /proc
1208 llap = llap_from_page(page, LLAP_ORIGIN_READAHEAD);
1209 if (IS_ERR(llap) || llap->llap_defer_uptodate) {
1210 if (PTR_ERR(llap) == -ENOLCK) {
1211 ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
1213 CDEBUG(D_READA | D_PAGE,
1214 "Adding page to cache failed index "
1220 /* skip completed pages */
1221 if (Page_Uptodate(page))
1224 /* bail when we hit the end of the lock. */
1225 rc = ll_issue_page_read(exp, llap, oig, 1);
1229 LL_CDEBUG_PAGE(D_READA| D_PAGE, page,
1230 "started read-ahead\n");
1233 LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
1234 "skipping read-ahead\n");
1238 page_cache_release(page);
1241 LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
1243 ll_ra_count_put(ll_i2sbi(inode), reserved);
1244 if (i == end + 1 && end == (kms >> CFS_PAGE_SHIFT))
1245 ll_ra_stats_inc(mapping, RA_STAT_EOF);
1247 /* if we didn't get to the end of the region we reserved from
1248 * the ras we need to go back and update the ras so that the
1249 * next read-ahead tries from where we left off. we only do so
1250 * if the region we failed to issue read-ahead on is still ahead
1251 * of the app and behind the next index to start read-ahead from */
1253 spin_lock(&ras->ras_lock);
1254 if (i < ras->ras_next_readahead &&
1255 index_in_window(i, ras->ras_window_start, 0,
1256 ras->ras_window_len)) {
1257 ras->ras_next_readahead = i;
1260 spin_unlock(&ras->ras_lock);
1266 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
1268 ras->ras_window_start = index & (~((1024 * 1024 >> CFS_PAGE_SHIFT) - 1));
1271 /* called with the ras_lock held or from places where it doesn't matter */
1272 static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
1274 ras->ras_last_readpage = index;
1275 ras->ras_consecutive_requests = 0;
1276 ras->ras_consecutive_pages = 0;
1277 ras->ras_window_len = 0;
1278 ras_set_start(ras, index);
1279 ras->ras_next_readahead = max(ras->ras_window_start, index);
1284 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
1286 spin_lock_init(&ras->ras_lock);
1288 ras->ras_requests = 0;
1289 INIT_LIST_HEAD(&ras->ras_read_beads);
1292 static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
1293 struct ll_readahead_state *ras, unsigned long index,
1296 struct ll_ra_info *ra = &sbi->ll_ra_info;
1300 spin_lock(&sbi->ll_lock);
1301 spin_lock(&ras->ras_lock);
1303 ll_ra_stats_inc_unlocked(ra, hit ? RA_STAT_HIT : RA_STAT_MISS);
1305 /* reset the read-ahead window in two cases. First when the app seeks
1306 * or reads to some other part of the file. Secondly if we get a
1307 * read-ahead miss that we think we've previously issued. This can
1308 * be a symptom of there being so many read-ahead pages that the VM is
1309 * reclaiming it before we get to it. */
1310 if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
1312 ll_ra_stats_inc_unlocked(ra, RA_STAT_DISTANT_READPAGE);
1313 } else if (!hit && ras->ras_window_len &&
1314 index < ras->ras_next_readahead &&
1315 index_in_window(index, ras->ras_window_start, 0,
1316 ras->ras_window_len)) {
1318 ll_ra_stats_inc_unlocked(ra, RA_STAT_MISS_IN_WINDOW);
1321 /* On the second access to a file smaller than the tunable
1322 * ra_max_read_ahead_whole_pages trigger RA on all pages in the
1323 * file up to ra_max_pages. This is simply a best effort and
1324 * only occurs once per open file. Normal RA behavior is reverted
1325 * to for subsequent IO. The mmap case does not increment
1326 * ras_requests and thus can never trigger this behavior. */
1327 if (ras->ras_requests == 2 && !ras->ras_request_index) {
1330 kms_pages = (i_size_read(inode) + CFS_PAGE_SIZE - 1) >>
1333 CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages,
1334 ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages);
1337 kms_pages <= ra->ra_max_read_ahead_whole_pages) {
1338 ras->ras_window_start = 0;
1339 ras->ras_last_readpage = 0;
1340 ras->ras_next_readahead = 0;
1341 ras->ras_window_len = min(ra->ra_max_pages,
1342 ra->ra_max_read_ahead_whole_pages);
1343 GOTO(out_unlock, 0);
1348 ras_reset(ras, index);
1349 GOTO(out_unlock, 0);
1352 ras->ras_last_readpage = index;
1353 ras->ras_consecutive_pages++;
1354 ras_set_start(ras, index);
1355 ras->ras_next_readahead = max(ras->ras_window_start,
1356 ras->ras_next_readahead);
1358 /* Trigger RA in the mmap case where ras_consecutive_requests
1359 * is not incremented and thus can't be used to trigger RA */
1360 if (!ras->ras_window_len && ras->ras_consecutive_pages == 3) {
1361 ras->ras_window_len = 1024 * 1024 >> CFS_PAGE_SHIFT;
1362 GOTO(out_unlock, 0);
1365 /* The initial ras_window_len is set to the request size. To avoid
1366 * uselessly reading and discarding pages for random IO the window is
1367 * only increased once per consecutive request received. */
1368 if (ras->ras_consecutive_requests > 1 && !ras->ras_request_index) {
1369 ras->ras_window_len = min(ras->ras_window_len +
1370 (1024 * 1024 >> CFS_PAGE_SHIFT),
1377 ras->ras_request_index++;
1378 spin_unlock(&ras->ras_lock);
1379 spin_unlock(&sbi->ll_lock);
1383 int ll_writepage(struct page *page)
1385 struct inode *inode = page->mapping->host;
1386 struct ll_inode_info *lli = ll_i2info(inode);
1387 struct obd_export *exp;
1388 struct ll_async_page *llap;
1392 LASSERT(!PageDirty(page));
1393 LASSERT(PageLocked(page));
1395 exp = ll_i2obdexp(inode);
1397 GOTO(out, rc = -EINVAL);
1399 llap = llap_from_page(page, LLAP_ORIGIN_WRITEPAGE);
1401 GOTO(out, rc = PTR_ERR(llap));
1403 LASSERT(!llap->llap_nocache);
1404 LASSERT(!PageWriteback(page));
1405 set_page_writeback(page);
1407 page_cache_get(page);
1408 if (llap->llap_write_queued) {
1409 LL_CDEBUG_PAGE(D_PAGE, page, "marking urgent\n");
1410 rc = obd_set_async_flags(exp, lli->lli_smd, NULL,
1412 ASYNC_READY | ASYNC_URGENT);
1414 rc = queue_or_sync_write(exp, inode, llap, CFS_PAGE_SIZE,
1415 ASYNC_READY | ASYNC_URGENT);
1418 page_cache_release(page);
1421 if (!lli->lli_async_rc)
1422 lli->lli_async_rc = rc;
1423 /* re-dirty page on error so it retries write */
1424 if (PageWriteback(page)) {
1425 end_page_writeback(page);
1427 /* resend page only for not started IO*/
1428 if (!PageError(page))
1429 ll_redirty_page(page);
1436 * for now we do our readpage the same on both 2.4 and 2.5. The kernel's
1437 * read-ahead assumes it is valid to issue readpage all the way up to
1438 * i_size, but our dlm locks make that not the case. We disable the
1439 * kernel's read-ahead and do our own by walking ahead in the page cache
1440 * checking for dlm lock coverage. the main difference between 2.4 and
1441 * 2.6 is how read-ahead gets batched and issued, but we're using our own,
1442 * so they look the same.
1444 int ll_readpage(struct file *filp, struct page *page)
1446 struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
1447 struct inode *inode = page->mapping->host;
1448 struct obd_export *exp;
1449 struct ll_async_page *llap;
1450 struct obd_io_group *oig = NULL;
1451 struct lustre_handle *lockh = NULL;
1455 LASSERT(PageLocked(page));
1456 LASSERT(!PageUptodate(page));
1457 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset=%Lu=%#Lx\n",
1458 inode->i_ino, inode->i_generation, inode,
1459 (((loff_t)page->index) << CFS_PAGE_SHIFT),
1460 (((loff_t)page->index) << CFS_PAGE_SHIFT));
1461 LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
1463 if (!ll_i2info(inode)->lli_smd) {
1464 /* File with no objects - one big hole */
1465 /* We use this just for remove_from_page_cache that is not
1466 * exported, we'd make page back up to date. */
1467 ll_truncate_complete_page(page);
1468 clear_page(kmap(page));
1470 SetPageUptodate(page);
1475 rc = oig_init(&oig);
1479 exp = ll_i2obdexp(inode);
1481 GOTO(out, rc = -EINVAL);
1483 if (fd->fd_flags & LL_FILE_GROUP_LOCKED)
1484 lockh = &fd->fd_cwlockh;
1486 llap = llap_from_page_with_lockh(page, LLAP_ORIGIN_READPAGE, lockh);
1488 if (PTR_ERR(llap) == -ENOLCK) {
1489 CWARN("ino %lu page %lu (%llu) not covered by "
1490 "a lock (mmap?). check debug logs.\n",
1491 inode->i_ino, page->index,
1492 (long long)page->index << PAGE_CACHE_SHIFT);
1494 GOTO(out, rc = PTR_ERR(llap));
1497 if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1498 ras_update(ll_i2sbi(inode), inode, &fd->fd_ras, page->index,
1499 llap->llap_defer_uptodate);
1502 if (llap->llap_defer_uptodate) {
1503 /* This is the callpath if we got the page from a readahead */
1504 llap->llap_ra_used = 1;
1505 rc = ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1508 obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd,
1510 LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
1511 SetPageUptodate(page);
1513 GOTO(out_oig, rc = 0);
1516 rc = ll_issue_page_read(exp, llap, oig, 0);
1520 LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
1521 /* We have just requested the actual page we want, see if we can tack
1522 * on some readahead to that page's RPC before it is sent. */
1523 if (ll_i2sbi(inode)->ll_ra_info.ra_max_pages)
1524 ll_readahead(&fd->fd_ras, exp, page->mapping, oig,
1527 rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);
1538 static void ll_file_put_pages(struct page **pages, int numpages)
1544 for (i = 0, pp = pages; i < numpages; i++, pp++) {
1546 LL_CDEBUG_PAGE(D_PAGE, (*pp), "free\n");
1548 if (page_private(*pp))
1549 CERROR("the llap wasn't freed\n");
1550 (*pp)->mapping = NULL;
1551 if (page_count(*pp) != 1)
1552 CERROR("page %p, flags %#lx, count %i, private %p\n",
1553 (*pp), (unsigned long)(*pp)->flags, page_count(*pp),
1554 (void*)page_private(*pp));
1555 __free_pages(*pp, 0);
1558 OBD_FREE(pages, numpages * sizeof(struct page*));
1562 static struct page **ll_file_prepare_pages(int numpages, struct inode *inode,
1563 unsigned long first)
1565 struct page **pages;
1570 OBD_ALLOC(pages, sizeof(struct page *) * numpages);
1572 RETURN(ERR_PTR(-ENOMEM));
1573 for (i = 0; i < numpages; i++) {
1575 struct ll_async_page *llap;
1577 page = alloc_pages(GFP_HIGHUSER, 0);
1579 GOTO(err, rc = -ENOMEM);
1581 /* llap_from_page needs page index and mapping to be set */
1582 page->index = first++;
1583 page->mapping = inode->i_mapping;
1584 llap = llap_from_page(page, LLAP_ORIGIN_LOCKLESS_IO);
1586 GOTO(err, rc = PTR_ERR(llap));
1587 llap->llap_lockless_io_page = 1;
1591 ll_file_put_pages(pages, numpages);
1592 RETURN(ERR_PTR(rc));
1595 static ssize_t ll_file_copy_pages(struct page **pages, int numpages,
1596 char *buf, loff_t pos, size_t count,
1601 int updatechecksum = ll_i2sbi(pages[0]->mapping->host)->ll_flags &
1602 LL_SBI_LLITE_CHECKSUM;
1605 for (i = 0; i < numpages; i++) {
1606 unsigned offset, bytes, left;
1609 vaddr = kmap(pages[i]);
1610 offset = pos & (CFS_PAGE_SIZE - 1);
1611 bytes = min_t(unsigned, CFS_PAGE_SIZE - offset, count);
1612 LL_CDEBUG_PAGE(D_PAGE, pages[i], "op = %s, addr = %p, "
1613 "buf = %p, bytes = %u\n",
1614 (rw == WRITE) ? "CFU" : "CTU",
1615 vaddr + offset, buf, bytes);
1617 left = copy_from_user(vaddr + offset, buf, bytes);
1618 if (updatechecksum) {
1619 struct ll_async_page *llap;
1621 llap = llap_cast_private(pages[i]);
1622 llap->llap_checksum = crc32_le(0, vaddr,
1626 left = copy_to_user(buf, vaddr + offset, bytes);
1643 static int ll_file_oig_pages(struct inode * inode, struct page **pages,
1644 int numpages, loff_t pos, size_t count, int rw)
1646 struct obd_io_group *oig;
1647 struct ll_inode_info *lli = ll_i2info(inode);
1648 struct obd_export *exp;
1649 loff_t org_pos = pos;
1655 exp = ll_i2obdexp(inode);
1658 rc = oig_init(&oig);
1661 brw_flags = OBD_BRW_SRVLOCK;
1662 if (capable(CAP_SYS_RESOURCE))
1663 brw_flags |= OBD_BRW_NOQUOTA;
1665 for (i = 0; i < numpages; i++) {
1666 struct ll_async_page *llap;
1667 unsigned from, bytes;
1669 from = pos & (CFS_PAGE_SIZE - 1);
1670 bytes = min_t(unsigned, CFS_PAGE_SIZE - from,
1671 count - pos + org_pos);
1672 llap = llap_cast_private(pages[i]);
1675 lock_page(pages[i]);
1677 LL_CDEBUG_PAGE(D_PAGE, pages[i], "offset "LPU64","
1678 " from %u, bytes = %u\n",
1680 LASSERTF(pos >> CFS_PAGE_SHIFT == pages[i]->index,
1681 "wrong page index %lu (%lu)\n",
1683 (unsigned long)(pos >> CFS_PAGE_SHIFT));
1684 rc = obd_queue_group_io(exp, lli->lli_smd, NULL, oig,
1687 OBD_BRW_WRITE:OBD_BRW_READ,
1688 from, bytes, brw_flags,
1689 ASYNC_READY | ASYNC_URGENT |
1690 ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);
1697 rc = obd_trigger_group_io(exp, lli->lli_smd, NULL, oig);
1703 unlock_page(pages[i]);
1708 ssize_t ll_file_lockless_io(struct file *file, char *buf, size_t count,
1709 loff_t *ppos, int rw)
1712 struct inode *inode = file->f_dentry->d_inode;
1716 unsigned long first, last;
1722 ll_inode_size_lock(inode, 0);
1723 isize = i_size_read(inode);
1724 ll_inode_size_unlock(inode, 0);
1727 if (*ppos + count >= isize)
1728 count -= *ppos + count - isize;
1732 rc = generic_write_checks(file, ppos, &count, 0);
1735 rc = remove_suid(file->f_dentry);
1740 first = pos >> CFS_PAGE_SHIFT;
1741 last = (pos + count - 1) >> CFS_PAGE_SHIFT;
1742 max_pages = PTLRPC_MAX_BRW_PAGES *
1743 ll_i2info(inode)->lli_smd->lsm_stripe_count;
1744 CDEBUG(D_INFO, "%u, stripe_count = %u\n",
1745 PTLRPC_MAX_BRW_PAGES /* max_pages_per_rpc */,
1746 ll_i2info(inode)->lli_smd->lsm_stripe_count);
1748 while (first <= last && rc >= 0) {
1750 struct page **pages;
1751 size_t bytes = count - amount;
1753 pages_for_io = min_t(int, last - first + 1, max_pages);
1754 pages = ll_file_prepare_pages(pages_for_io, inode, first);
1755 if (IS_ERR(pages)) {
1756 rc = PTR_ERR(pages);
1760 rc = ll_file_copy_pages(pages, pages_for_io, buf,
1761 pos + amount, bytes, rw);
1763 GOTO(put_pages, rc);
1766 rc = ll_file_oig_pages(inode, pages, pages_for_io,
1767 pos + amount, bytes, rw);
1769 GOTO(put_pages, rc);
1771 rc = ll_file_copy_pages(pages, pages_for_io, buf,
1772 pos + amount, bytes, rw);
1774 GOTO(put_pages, rc);
1780 ll_file_put_pages(pages, pages_for_io);
1781 first += pages_for_io;
1782 /* a short read/write check */
1783 if (pos + amount < ((loff_t)first << CFS_PAGE_SHIFT))
1786 /* NOTE: don't update i_size and KMS in absence of LDLM locks even
1787 * write makes the file large */
1788 file_accessed(file);
1789 if (rw == READ && amount < count && rc == 0) {
1790 unsigned long not_cleared;
1792 not_cleared = clear_user(buf, count - amount);
1793 amount = count - not_cleared;
1798 lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
1800 LPROC_LL_LOCKLESS_WRITE :
1801 LPROC_LL_LOCKLESS_READ,