* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <obd.h>
-#include <lustre_lite.h>
-
+#include "llite_internal.h"
#include "vvp_internal.h"
-static struct vvp_io *cl2vvp_io(const struct lu_env *env,
- const struct cl_io_slice *slice);
-
/**
* True, if \a io is a normal io, False for splice_{read,write}
*/
struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct ccc_io *cio = ccc_env_io(env);
+ struct vvp_io *cio = vvp_env_io(env);
bool rc = true;
switch (io->ci_type) {
static int vvp_io_write_iter_init(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct vvp_io *cio = cl2vvp_io(env, ios);
cl_page_list_init(&cio->u.write.cui_queue);
cio->u.write.cui_written = 0;
static void vvp_io_write_iter_fini(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct vvp_io *cio = cl2vvp_io(env, ios);
LASSERT(cio->u.write.cui_queue.pl_nr == 0);
}
static int vvp_io_fault_iter_init(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct inode *inode = ccc_object_inode(ios->cis_obj);
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct inode *inode = vvp_object_inode(ios->cis_obj);
+
+ LASSERT(inode == vio->cui_fd->fd_file->f_dentry->d_inode);
+ vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime);
- LASSERT(inode ==
- cl2ccc_io(env, ios)->cui_fd->fd_file->f_dentry->d_inode);
- vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime);
- return 0;
+ return 0;
}
static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
{
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct ccc_io *cio = cl2ccc_io(env, ios);
- struct inode *inode = ccc_object_inode(obj);
+ struct vvp_io *cio = cl2vvp_io(env, ios);
+ struct inode *inode = vvp_object_inode(obj);
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d "
"restore needed %d\n",
/* today successful restore is the only possible
* case */
/* restore was done, clear restoring state */
- ll_i2info(ccc_object_inode(obj))->lli_flags &=
+ ll_i2info(vvp_object_inode(obj))->lli_flags &=
~LLIF_FILE_RESTORING;
}
}
struct cl_io *io = ios->cis_io;
struct cl_page *page = io->u.ci_fault.ft_page;
- CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
+ CLOBINVRNT(env, io->ci_obj, vvp_object_invariant(io->ci_obj));
if (page != NULL) {
lu_ref_del(&page->cp_reference, "fault", io);
}
static int vvp_mmap_locks(const struct lu_env *env,
- struct ccc_io *vio, struct cl_io *io)
+ struct vvp_io *vio, struct cl_io *io)
{
struct ccc_thread_info *cti = ccc_env_info(env);
struct mm_struct *mm = current->mm;
unsigned long addr;
unsigned long seg;
ssize_t count;
- int result;
+ int result = 0;
ENTRY;
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
struct inode *inode = vma->vm_file->f_dentry->d_inode;
int flags = CEF_MUST;
- if (ll_file_nolock(vma->vm_file)) {
- /*
- * For no lock case, a lockless lock will be
- * generated.
- */
- flags = CEF_NEVER;
- }
+ if (ll_file_nolock(vma->vm_file)) {
+ /*
+ * For no lock case is not allowed for mmap
+ */
+ result = -EINVAL;
+ break;
+ }
/*
* XXX: Required lock mode can be weakened: CIT_WRITE
descr->cld_mode, descr->cld_start,
descr->cld_end);
- if (result < 0) {
- up_read(&mm->mmap_sem);
- RETURN(result);
- }
+ if (result < 0)
+ break;
- if (vma->vm_end - addr >= count)
- break;
+ if (vma->vm_end - addr >= count)
+ break;
- count -= vma->vm_end - addr;
- addr = vma->vm_end;
- }
- up_read(&mm->mmap_sem);
- }
- RETURN(0);
+ count -= vma->vm_end - addr;
+ addr = vma->vm_end;
+ }
+ up_read(&mm->mmap_sem);
+ if (result < 0)
+ break;
+ }
+ RETURN(result);
}
static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
enum cl_lock_mode mode, loff_t start, loff_t end)
{
- struct ccc_io *cio = ccc_env_io(env);
- int result;
- int ast_flags = 0;
+ struct vvp_io *cio = vvp_env_io(env);
+ int result;
+ int ast_flags = 0;
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- ENTRY;
+ LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
+ ENTRY;
- ccc_io_update_iov(env, cio, io);
+ vvp_io_update_iov(env, cio, io);
+
+ if (io->u.ci_rw.crw_nonblock)
+ ast_flags |= CEF_NONBLOCK;
+
+ result = vvp_mmap_locks(env, cio, io);
+ if (result == 0)
+ result = vvp_io_one_lock(env, io, ast_flags, mode, start, end);
- if (io->u.ci_rw.crw_nonblock)
- ast_flags |= CEF_NONBLOCK;
- result = vvp_mmap_locks(env, cio, io);
- if (result == 0)
- result = ccc_io_one_lock(env, io, ast_flags, mode, start, end);
RETURN(result);
}
static int vvp_io_read_lock(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct cl_io *io = ios->cis_io;
+ struct cl_io *io = ios->cis_io;
struct cl_io_rw_common *rd = &io->u.ci_rd.rd;
int result;
/*
* XXX LDLM_FL_CBPENDING
*/
- return ccc_io_one_lock_index
- (env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma),
- io->u.ci_fault.ft_index, io->u.ci_fault.ft_index);
+ return vvp_io_one_lock_index(env,
+ io, 0,
+ vvp_mode_from_vma(vio->u.fault.ft_vma),
+ io->u.ci_fault.ft_index,
+ io->u.ci_fault.ft_index);
}
static int vvp_io_write_lock(const struct lu_env *env,
static int vvp_io_setattr_lock(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct ccc_io *cio = ccc_env_io(env);
+ struct vvp_io *cio = vvp_env_io(env);
struct cl_io *io = ios->cis_io;
__u64 new_size;
__u32 enqflags = 0;
new_size = 0;
}
cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK;
- return ccc_io_one_lock(env, io, enqflags, CLM_WRITE,
- new_size, OBD_OBJECT_EOF);
+
+ return vvp_io_one_lock(env, io, enqflags, CLM_WRITE,
+ new_size, OBD_OBJECT_EOF);
}
static int vvp_do_vmtruncate(struct inode *inode, size_t size)
attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
valid |= CAT_MTIME;
}
- result = cl_object_attr_set(env, obj, attr, valid);
- cl_object_attr_unlock(obj);
+ result = cl_object_attr_update(env, obj, attr, valid);
+ cl_object_attr_unlock(obj);
- return result;
+ return result;
}
static int vvp_io_setattr_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(io->ci_obj);
+ struct inode *inode = vvp_object_inode(io->ci_obj);
int result = 0;
mutex_lock(&inode->i_mutex);
const struct cl_io_slice *ios)
{
struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(io->ci_obj);
+ struct inode *inode = vvp_object_inode(io->ci_obj);
if (cl_io_is_trunc(io)) {
/* Truncate in memory pages - they must be clean pages
}
static int vvp_io_read_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
+ const struct cl_io_slice *ios)
{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct ccc_io *cio = cl2ccc_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
- struct ll_ra_read *bead = &vio->cui_bead;
- struct file *file = cio->cui_fd->fd_file;
-
- int result;
- loff_t pos = io->u.ci_rd.rd.crw_pos;
- long cnt = io->u.ci_rd.rd.crw_count;
- long tot = cio->cui_tot_count;
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = io->ci_obj;
+ struct inode *inode = vvp_object_inode(obj);
+ struct file *file = vio->cui_fd->fd_file;
+
+ int result;
+ loff_t pos = io->u.ci_rd.rd.crw_pos;
+ long cnt = io->u.ci_rd.rd.crw_count;
+ long tot = vio->cui_tot_count;
int exceed = 0;
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
"Read ino %lu, %lu bytes, offset %lld, size %llu\n",
inode->i_ino, cnt, pos, i_size_read(inode));
- /* turn off the kernel's read-ahead */
- cio->cui_fd->fd_file->f_ra.ra_pages = 0;
+ /* turn off the kernel's read-ahead */
+ vio->cui_fd->fd_file->f_ra.ra_pages = 0;
- /* initialize read-ahead window once per syscall */
- if (!vio->cui_ra_window_set) {
- vio->cui_ra_window_set = 1;
- bead->lrr_start = cl_index(obj, pos);
- bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
- ll_ra_read_in(file, bead);
- }
+ /* initialize read-ahead window once per syscall */
+ if (!vio->cui_ra_valid) {
+ vio->cui_ra_valid = true;
+ vio->cui_ra_start = cl_index(obj, pos);
+ vio->cui_ra_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
+ ll_ras_enter(file);
+ }
/* BUG: 5972 */
file_accessed(file);
switch (vio->cui_io_subtype) {
case IO_NORMAL:
- LASSERT(cio->cui_iocb->ki_pos == pos);
- result = generic_file_aio_read(cio->cui_iocb,
- cio->cui_iov, cio->cui_nrsegs,
- cio->cui_iocb->ki_pos);
+ LASSERT(vio->cui_iocb->ki_pos == pos);
+ result = generic_file_aio_read(vio->cui_iocb,
+ vio->cui_iov, vio->cui_nrsegs,
+ vio->cui_iocb->ki_pos);
break;
- case IO_SPLICE:
- result = generic_file_splice_read(file, &pos,
- vio->u.splice.cui_pipe, cnt,
- vio->u.splice.cui_flags);
- /* LU-1109: do splice read stripe by stripe otherwise if it
- * may make nfsd stuck if this read occupied all internal pipe
- * buffers. */
- io->ci_continue = 0;
- break;
- default:
- CERROR("Wrong IO type %u\n", vio->cui_io_subtype);
- LBUG();
- }
+ case IO_SPLICE:
+ result = generic_file_splice_read(file, &pos,
+ vio->u.splice.cui_pipe, cnt,
+ vio->u.splice.cui_flags);
+ /* LU-1109: do splice read stripe by stripe otherwise if it
+ * may make nfsd stuck if this read occupied all internal pipe
+ * buffers. */
+ io->ci_continue = 0;
+ break;
+ default:
+ CERROR("Wrong IO type %u\n", vio->cui_io_subtype);
+ LBUG();
+ }
out:
if (result >= 0) {
if (result < cnt)
io->ci_continue = 0;
io->ci_nob += result;
- ll_rw_stats_tally(ll_i2sbi(inode), current->pid, cio->cui_fd,
+ ll_rw_stats_tally(ll_i2sbi(inode), current->pid, vio->cui_fd,
pos, result, READ);
result = 0;
}
return result;
}
-static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct ccc_io *cio = cl2ccc_io(env, ios);
-
- if (vio->cui_ra_window_set)
- ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead);
-
- vvp_io_fini(env, ios);
-}
-
static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
struct cl_page_list *plist, int from, int to)
{
page = cl_page_list_first(plist);
if (plist->pl_nr == 1) {
cl_page_clip(env, page, from, to);
- } else if (from > 0) {
- cl_page_clip(env, page, from, PAGE_SIZE);
} else {
- page = cl_page_list_last(plist);
- cl_page_clip(env, page, 0, to);
+ if (from > 0)
+ cl_page_clip(env, page, from, PAGE_SIZE);
+ if (to != PAGE_SIZE) {
+ page = cl_page_list_last(plist);
+ cl_page_clip(env, page, 0, to);
+ }
}
}
static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
struct cl_page *page)
{
- struct ccc_page *cp;
+ struct vvp_page *vpg;
struct page *vmpage = page->cp_vmpage;
struct cl_object *clob = cl_io_top(io)->ci_obj;
SetPageUptodate(vmpage);
set_page_dirty(vmpage);
- cp = cl2ccc_page(cl_object_page_slice(clob, page));
- vvp_write_pending(cl2ccc(clob), cp);
+ vpg = cl2vvp_page(cl_object_page_slice(clob, page));
+ vvp_write_pending(cl2vvp(clob), vpg);
cl_page_disown(env, io, page);
/* held in ll_cl_init() */
- lu_ref_del(&page->cp_reference, "cl_io", io);
+ lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
cl_page_put(env, page);
}
pgoff_t index = CL_PAGE_EOF;
cl_page_list_for_each(page, plist) {
- struct ccc_page *cp = cl_object_page_slice(obj, page);
+ struct vvp_page *vpg = cl_object_page_slice(obj, page);
if (index == CL_PAGE_EOF) {
- index = ccc_index(cp);
+ index = vvp_index(vpg);
continue;
}
++index;
- if (index == ccc_index(cp))
+ if (index == vvp_index(vpg))
continue;
return false;
int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
{
struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
- struct ccc_io *cio = ccc_env_io(env);
+ struct inode *inode = vvp_object_inode(obj);
+ struct vvp_io *cio = vvp_env_io(env);
struct cl_page_list *queue = &cio->u.write.cui_queue;
struct cl_page *page;
int rc = 0;
}
/* update inode size */
- ll_merge_lvb(env, inode);
+ ll_merge_attr(env, inode);
/* Now the pages in queue were failed to commit, discard them
* unless they were dirtied before. */
static int vvp_io_write_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct vvp_io *cio = cl2vvp_io(env, ios);
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(obj);
ssize_t result = 0;
loff_t pos = io->u.ci_wr.wr.crw_pos;
size_t cnt = io->u.ci_wr.wr.crw_count;
* PARALLEL IO This has to be changed for parallel IO doing
* out-of-order writes.
*/
+ ll_merge_attr(env, inode);
pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
cio->cui_iocb->ki_pos = pos;
} else {
LASSERT(cio->cui_iocb->ki_pos == pos);
}
- CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
+ CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
- if (cio->cui_iov == NULL) /* from a temp io in ll_cl_init(). */
- result = 0;
- else
- result = generic_file_aio_write(cio->cui_iocb,
- cio->cui_iov, cio->cui_nrsegs,
- cio->cui_iocb->ki_pos);
+ if (cio->cui_iov == NULL) {
+ /* from a temp io in ll_cl_init(). */
+ result = 0;
+ } else {
+ /*
+ * When using the locked AIO function (generic_file_aio_write())
+ * testing has shown the inode mutex to be a limiting factor
+ * with multi-threaded single shared file performance. To get
+ * around this, we now use the lockless version. To maintain
+ * consistency, proper locking to protect against writes,
+ * trucates, etc. is handled in the higher layers of lustre.
+ */
+ result = __generic_file_aio_write(cio->cui_iocb,
+ cio->cui_iov, cio->cui_nrsegs,
+ &cio->cui_iocb->ki_pos);
+ if (result > 0 || result == -EIOCBQUEUED) {
+ ssize_t err;
+
+ err = generic_write_sync(cio->cui_iocb->ki_filp,
+ pos, result);
+ if (err < 0 && result > 0)
+ result = err;
+ }
+
+ }
if (result > 0) {
result = vvp_io_write_commit(env, io);
if (cio->u.write.cui_written > 0) {
static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
{
- struct vm_fault *vmf = cfio->fault.ft_vmf;
+ struct vm_fault *vmf = cfio->ft_vmf;
- cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
- cfio->fault.ft_flags_valid = 1;
+ cfio->ft_flags = filemap_fault(cfio->ft_vma, vmf);
+ cfio->ft_flags_valid = 1;
- if (vmf->page) {
- LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
- vmf->virtual_address);
- if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
- lock_page(vmf->page);
- cfio->fault.ft_flags |= VM_FAULT_LOCKED;
- }
+ if (vmf->page) {
+ LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
+ vmf->virtual_address);
+ if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) {
+ lock_page(vmf->page);
+ cfio->ft_flags |= VM_FAULT_LOCKED;
+ }
- cfio->ft_vmpage = vmf->page;
- return 0;
- }
+ cfio->ft_vmpage = vmf->page;
- if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
- CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
- return -EFAULT;
- }
+ return 0;
+ }
- if (cfio->fault.ft_flags & VM_FAULT_OOM) {
- CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
- return -ENOMEM;
- }
+ if (cfio->ft_flags & VM_FAULT_SIGBUS) {
+ CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
+ return -EFAULT;
+ }
+
+ if (cfio->ft_flags & VM_FAULT_OOM) {
+ CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
+ return -ENOMEM;
+ }
+
+ if (cfio->ft_flags & VM_FAULT_RETRY)
+ return -EAGAIN;
- if (cfio->fault.ft_flags & VM_FAULT_RETRY)
- return -EAGAIN;
+ CERROR("unknown error in page fault %d\n", cfio->ft_flags);
- CERROR("unknow error in page fault %d!\n", cfio->fault.ft_flags);
- return -EINVAL;
+ return -EINVAL;
}
static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
struct cl_page *page)
{
- struct ccc_page *cp;
+ struct vvp_page *vpg;
struct cl_object *clob = cl_io_top(io)->ci_obj;
set_page_dirty(page->cp_vmpage);
- cp = cl2ccc_page(cl_object_page_slice(clob, page));
- vvp_write_pending(cl2ccc(clob), cp);
+ vpg = cl2vvp_page(cl_object_page_slice(clob, page));
+ vvp_write_pending(cl2vvp(clob), vpg);
}
static int vvp_io_fault_start(const struct lu_env *env,
struct vvp_io *vio = cl2vvp_io(env, ios);
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(obj);
struct cl_fault_io *fio = &io->u.ci_fault;
struct vvp_fault_io *cfio = &vio->u.fault;
loff_t offset;
wait_on_page_writeback(vmpage);
if (!PageDirty(vmpage)) {
struct cl_page_list *plist = &io->ci_queue.c2_qin;
- struct ccc_page *cp = cl_object_page_slice(obj, page);
+ struct vvp_page *vpg = cl_object_page_slice(obj, page);
int to = PAGE_SIZE;
/* vvp_page_assume() calls wait_on_page_writeback(). */
cl_page_list_add(plist, page);
/* size fixup */
- if (last_index == ccc_index(cp))
+ if (last_index == vvp_index(vpg))
to = size & ~CFS_PAGE_MASK;
/* Do not set Dirty bit here so that in case IO is
/* return unlocked vmpage to avoid deadlocking */
if (vmpage != NULL)
unlock_page(vmpage);
- cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
+
+ cfio->ft_flags &= ~VM_FAULT_LOCKED;
+
return result;
}
const struct cl_page_slice *slice)
{
struct cl_io *io = ios->cis_io;
- struct ccc_page *cp = cl2ccc_page(slice);
+ struct vvp_page *vpg = cl2vvp_page(slice);
struct cl_page *page = slice->cpl_page;
- struct inode *inode = ccc_object_inode(slice->cpl_obj);
+ struct inode *inode = vvp_object_inode(slice->cpl_obj);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd;
+ struct ll_file_data *fd = cl2vvp_io(env, ios)->cui_fd;
struct ll_readahead_state *ras = &fd->fd_ras;
struct cl_2queue *queue = &io->ci_queue;
if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
sbi->ll_ra_info.ra_max_pages > 0)
- ras_update(sbi, inode, ras, ccc_index(cp),
- cp->cpg_defer_uptodate);
+ ras_update(sbi, inode, ras, vvp_index(vpg),
+ vpg->vpg_defer_uptodate);
- if (cp->cpg_defer_uptodate) {
- cp->cpg_ra_used = 1;
- cl_page_export(env, page, 1);
- }
+ if (vpg->vpg_defer_uptodate) {
+ vpg->vpg_ra_used = 1;
+ cl_page_export(env, page, 1);
+ }
- /*
- * Add page into the queue even when it is marked uptodate above.
- * this will unlock it automatically as part of cl_page_list_disown().
- */
- cl_2queue_add(queue, page);
+ /*
+ * Add page into the queue even when it is marked uptodate above.
+ * this will unlock it automatically as part of cl_page_list_disown().
+ */
+ cl_2queue_add(queue, page);
if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
sbi->ll_ra_info.ra_max_pages > 0)
ll_readahead(env, io, &queue->c2_qin, ras,
- cp->cpg_defer_uptodate);
+ vpg->vpg_defer_uptodate);
RETURN(0);
}
static const struct cl_io_operations vvp_io_ops = {
- .op = {
- [CIT_READ] = {
- .cio_fini = vvp_io_read_fini,
- .cio_lock = vvp_io_read_lock,
- .cio_start = vvp_io_read_start,
- .cio_advance = ccc_io_advance
- },
+ .op = {
+ [CIT_READ] = {
+ .cio_fini = vvp_io_fini,
+ .cio_lock = vvp_io_read_lock,
+ .cio_start = vvp_io_read_start,
+ .cio_advance = vvp_io_advance,
+ },
[CIT_WRITE] = {
.cio_fini = vvp_io_fini,
.cio_iter_init = vvp_io_write_iter_init,
.cio_iter_fini = vvp_io_write_iter_fini,
.cio_lock = vvp_io_write_lock,
.cio_start = vvp_io_write_start,
- .cio_advance = ccc_io_advance
+ .cio_advance = vvp_io_advance,
},
[CIT_SETATTR] = {
.cio_fini = vvp_io_setattr_fini,
.cio_iter_init = vvp_io_fault_iter_init,
.cio_lock = vvp_io_fault_lock,
.cio_start = vvp_io_fault_start,
- .cio_end = ccc_io_end
+ .cio_end = vvp_io_end,
},
[CIT_FSYNC] = {
.cio_start = vvp_io_fsync_start,
struct cl_io *io)
{
struct vvp_io *vio = vvp_env_io(env);
- struct ccc_io *cio = ccc_env_io(env);
- struct inode *inode = ccc_object_inode(obj);
- int result;
+ struct inode *inode = vvp_object_inode(obj);
+ int result;
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- ENTRY;
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+ ENTRY;
CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d "
- "restore needed %d\n",
+ "restore needed %d\n",
PFID(lu_object_fid(&obj->co_lu)),
io->ci_ignore_layout, io->ci_verify_layout,
- cio->cui_layout_gen, io->ci_restore_needed);
+ vio->cui_layout_gen, io->ci_restore_needed);
- CL_IO_SLICE_CLEAN(cio, cui_cl);
- cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
- vio->cui_ra_window_set = 0;
+ CL_IO_SLICE_CLEAN(vio, cui_cl);
+ cl_io_slice_add(io, &vio->cui_cl, obj, &vvp_io_ops);
+ vio->cui_ra_valid = false;
result = 0;
if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
size_t count;
* results." -- Single Unix Spec */
if (count == 0)
result = 1;
- else {
- cio->cui_tot_count = count;
- cio->cui_tot_nrsegs = 0;
- }
+ else {
+ vio->cui_tot_count = count;
+ vio->cui_tot_nrsegs = 0;
+ }
/* for read/write, we store the jobid in the inode, and
* it'll be fetched by osc when building RPC.
* even for operations requiring to open file, such as read and write,
* because it might not grant layout lock in IT_OPEN. */
if (result == 0 && !io->ci_ignore_layout) {
- result = ll_layout_refresh(inode, &cio->cui_layout_gen);
+ result = ll_layout_refresh(inode, &vio->cui_layout_gen);
if (result == -ENOENT)
/* If the inode on MDS has been removed, but the objects
* on OSTs haven't been destroyed (async unlink), layout
RETURN(result);
}
-
-static struct vvp_io *cl2vvp_io(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- /* Caling just for assertion */
- cl2ccc_io(env, slice);
- return vvp_env_io(env);
-}