* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011 Whamcloud, Inc.
+ *
*/
/*
* This file is part of Lustre, http://www.lustre.org/
static struct vvp_io *cl2vvp_io(const struct lu_env *env,
const struct cl_io_slice *slice);
+/**
+ * True, if \a io is a normal io, False for sendfile() / splice_{read|write}
+ */
+int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
+{
+ struct vvp_io *vio = vvp_env_io(env);
+
+ LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
+
+ return vio->cui_io_subtype == IO_NORMAL;
+}
+
/*****************************************************************************
*
* io operations.
struct cl_object *obj = io->ci_obj;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- if (io->ci_type == CIT_WRITE)
- up(&ll_i2info(ccc_object_inode(obj))->lli_write_sem);
- else {
+ if (io->ci_type == CIT_READ) {
struct vvp_io *vio = cl2vvp_io(env, ios);
struct ccc_io *cio = cl2ccc_io(env, ios);
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- if (cl_io_is_sendfile(io))
+ if (!cl_is_normalio(env, io))
RETURN(0);
for (seg = 0; seg < vio->cui_nrsegs; seg++) {
policy.l_extent.start);
descr->cld_end = cl_index(descr->cld_obj,
policy.l_extent.end);
- result = cl_io_lock_alloc_add(env, io, descr, flags);
+ descr->cld_enq_flags = flags;
+ result = cl_io_lock_alloc_add(env, io, descr);
+
+ CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
+ descr->cld_mode, descr->cld_start,
+ descr->cld_end);
+
if (result < 0)
RETURN(result);
int ast_flags = 0;
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- LASSERT(vvp_env_io(env)->cui_oneshot == 0);
ENTRY;
ccc_io_update_iov(env, cio, io);
struct cl_io *io = ios->cis_io;
loff_t start;
loff_t end;
- int result;
- if (cl2vvp_io(env, ios)->cui_oneshot == 0) {
- if (io->u.ci_wr.wr_append) {
- start = 0;
- end = OBD_OBJECT_EOF;
- } else {
- start = io->u.ci_wr.wr.crw_pos;
- end = start + io->u.ci_wr.wr.crw_count - 1;
- }
- result = vvp_io_rw_lock(env, io, CLM_WRITE, start, end);
- } else
- result = 0;
- return result;
+ if (io->u.ci_wr.wr_append) {
+ start = 0;
+ end = OBD_OBJECT_EOF;
+ } else {
+ start = io->u.ci_wr.wr.crw_pos;
+ end = start + io->u.ci_wr.wr.crw_count - 1;
+ }
+ return vvp_io_rw_lock(env, io, CLM_WRITE, start, end);
}
-static int vvp_io_trunc_iter_init(const struct lu_env *env,
- const struct cl_io_slice *ios)
+static int vvp_io_setattr_iter_init(const struct lu_env *env,
+ const struct cl_io_slice *ios)
{
- struct ccc_io *vio = cl2ccc_io(env, ios);
+ struct ccc_io *cio = ccc_env_io(env);
struct inode *inode = ccc_object_inode(ios->cis_obj);
/*
* This last one is especially bad for racing o_append users on other
* nodes.
*/
-
UNLOCK_INODE_MUTEX(inode);
- UP_WRITE_I_ALLOC_SEM(inode);
- vio->u.trunc.cui_locks_released = 1;
+ if (cl_io_is_trunc(ios->cis_io))
+ UP_WRITE_I_ALLOC_SEM(inode);
+ cio->u.setattr.cui_locks_released = 1;
return 0;
}
/**
- * Implementation of cl_io_operations::cio_lock() method for CIT_TRUNC io.
+ * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io.
*
* Handles "lockless io" mode when extent locking is done by server.
*/
-static int vvp_io_trunc_lock(const struct lu_env *env,
- const struct cl_io_slice *ios)
+static int vvp_io_setattr_lock(const struct lu_env *env,
+ const struct cl_io_slice *ios)
{
- struct ccc_io *vio = cl2ccc_io(env, ios);
- struct cl_io *io = ios->cis_io;
- size_t new_size = io->u.ci_truncate.tr_size;
- __u32 enqflags = new_size == 0 ? CEF_DISCARD_DATA : 0;
- int result;
+ struct ccc_io *cio = ccc_env_io(env);
+ struct cl_io *io = ios->cis_io;
+ size_t new_size;
+ __u32 enqflags = 0;
+
+ if (cl_io_is_trunc(io)) {
+ new_size = io->u.ci_setattr.sa_attr.lvb_size;
+ if (new_size == 0)
+ enqflags = CEF_DISCARD_DATA;
+ } else {
+ if ((io->u.ci_setattr.sa_attr.lvb_mtime >=
+ io->u.ci_setattr.sa_attr.lvb_ctime) ||
+ (io->u.ci_setattr.sa_attr.lvb_atime >=
+ io->u.ci_setattr.sa_attr.lvb_ctime))
+ return 0;
+ new_size = 0;
+ }
+ cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK;
+ return ccc_io_one_lock(env, io, enqflags, CLM_WRITE,
+ new_size, OBD_OBJECT_EOF);
+}
+
+static int vvp_do_vmtruncate(struct inode *inode, size_t size)
+{
+ int result;
+ /*
+ * Only ll_inode_size_lock is taken at this level. lov_stripe_lock()
+ * is grabbed by ll_truncate() only over call to obd_adjust_kms(). If
+ * vmtruncate returns 0, then ll_truncate dropped ll_inode_size_lock()
+ */
+ ll_inode_size_lock(inode, 0);
+ result = vmtruncate(inode, size);
+ if (result != 0)
+ ll_inode_size_unlock(inode, 0);
- vio->u.trunc.cui_local_lock = TRUNC_EXTENT;
- result = ccc_io_one_lock(env, io, enqflags, CLM_WRITE,
- new_size, OBD_OBJECT_EOF);
return result;
}
-static int vvp_io_trunc_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
+static int vvp_io_setattr_trunc(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ struct inode *inode, loff_t size)
{
- struct ccc_io *cio = cl2ccc_io(env, ios);
struct vvp_io *vio = cl2vvp_io(env, ios);
struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(io->ci_obj);
struct cl_object *obj = ios->cis_obj;
- size_t size = io->u.ci_truncate.tr_size;
pgoff_t start = cl_index(obj, size);
int result;
- LASSERT(cio->u.trunc.cui_locks_released);
- LASSERT(vio->cui_oneshot == 0);
-
- LOCK_INODE_MUTEX(inode);
DOWN_WRITE_I_ALLOC_SEM(inode);
- cio->u.trunc.cui_locks_released = 0;
- /*
- * Only ll_inode_size_lock is taken at this level. lov_stripe_lock()
- * is grabbed by ll_truncate() only over call to obd_adjust_kms(). If
- * vmtruncate returns 0, then ll_truncate dropped ll_inode_size_lock()
- */
- ll_inode_size_lock(inode, 0);
- result = vmtruncate(inode, size);
- if (result != 0)
- ll_inode_size_unlock(inode, 0);
+ result = vvp_do_vmtruncate(inode, size);
+
/*
* If a page is partially truncated, keep it owned across truncate to
* prevent... races.
struct cl_object_header *hdr;
hdr = cl_object_header(obj);
- spin_lock(&hdr->coh_page_guard);
+ cfs_spin_lock(&hdr->coh_page_guard);
vio->cui_partpage = cl_page_lookup(hdr, start);
- spin_unlock(&hdr->coh_page_guard);
+ cfs_spin_unlock(&hdr->coh_page_guard);
if (vio->cui_partpage != NULL)
/*
return result;
}
-static void vvp_io_trunc_end(const struct lu_env *env,
- const struct cl_io_slice *ios)
+static int vvp_io_setattr_time(const struct lu_env *env,
+ const struct cl_io_slice *ios)
{
- struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = io->ci_obj;
+ struct cl_attr *attr = ccc_env_thread_attr(env);
+ int result;
+ unsigned valid = CAT_CTIME;
+
+ cl_object_attr_lock(obj);
+ attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime;
+ if (io->u.ci_setattr.sa_valid & ATTR_ATIME_SET) {
+ attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime;
+ valid |= CAT_ATIME;
+ }
+ if (io->u.ci_setattr.sa_valid & ATTR_MTIME_SET) {
+ attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
+ valid |= CAT_MTIME;
+ }
+ result = cl_object_attr_set(env, obj, attr, valid);
+ cl_object_attr_unlock(obj);
+
+ return result;
+}
+
+static int vvp_io_setattr_start(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct inode *inode = ccc_object_inode(io->ci_obj);
+ LASSERT(cio->u.setattr.cui_locks_released);
+
+ LOCK_INODE_MUTEX(inode);
+ cio->u.setattr.cui_locks_released = 0;
+
+ if (cl_io_is_trunc(io))
+ return vvp_io_setattr_trunc(env, ios, inode,
+ io->u.ci_setattr.sa_attr.lvb_size);
+ else
+ return vvp_io_setattr_time(env, ios);
+}
+
+static void vvp_io_setattr_end(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct inode *inode = ccc_object_inode(io->ci_obj);
+
+ if (!cl_io_is_trunc(io))
+ return;
if (vio->cui_partpage != NULL) {
cl_page_disown(env, ios->cis_io, vio->cui_partpage);
cl_page_put(env, vio->cui_partpage);
vio->cui_partpage = NULL;
}
+
+ /*
+ * Do vmtruncate again, to remove possible stale pages populated by
+ * competing read threads. bz20645.
+ */
+ vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
}
-static void vvp_io_trunc_fini(const struct lu_env *env,
- const struct cl_io_slice *ios)
+static void vvp_io_setattr_fini(const struct lu_env *env,
+ const struct cl_io_slice *ios)
{
struct ccc_io *cio = ccc_env_io(env);
+ struct cl_io *io = ios->cis_io;
struct inode *inode = ccc_object_inode(ios->cis_io->ci_obj);
- if (cio->u.trunc.cui_locks_released) {
+ if (cio->u.setattr.cui_locks_released) {
LOCK_INODE_MUTEX(inode);
- DOWN_WRITE_I_ALLOC_SEM(inode);
- cio->u.trunc.cui_locks_released = 0;
+ if (cl_io_is_trunc(io))
+ DOWN_WRITE_I_ALLOC_SEM(inode);
+ cio->u.setattr.cui_locks_released = 0;
}
vvp_io_fini(env, ios);
}
int exceed = 0;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- LASSERT(vio->cui_oneshot == 0);
CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
/* BUG: 5972 */
file_accessed(file);
- if (cl_io_is_sendfile(io)) {
+ switch (vio->cui_io_subtype) {
+ case IO_NORMAL:
+ result = lustre_generic_file_read(file, cio, &pos);
+ break;
+#ifdef HAVE_KERNEL_SENDFILE
+ case IO_SENDFILE:
result = generic_file_sendfile(file, &pos, cnt,
- vio->u.read.cui_actor, vio->u.read.cui_target);
- } else {
- result = lustre_generic_file_read(file, cio, &pos);
+ vio->u.sendfile.cui_actor,
+ vio->u.sendfile.cui_target);
+ break;
+#endif
+#ifdef HAVE_KERNEL_SPLICE_READ
+ case IO_SPLICE:
+ result = generic_file_splice_read(file, &pos,
+ vio->u.splice.cui_pipe, cnt,
+ vio->u.splice.cui_flags);
+ break;
+#endif
+ default:
+ CERROR("Wrong IO type %u\n", vio->cui_io_subtype);
+ LBUG();
}
out:
CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
- if (cl2vvp_io(env, ios)->cui_oneshot > 0)
+ if (cio->cui_iov == NULL) /* from a temp io in ll_cl_init(). */
result = 0;
else
result = lustre_generic_file_write(file, cio, &pos);
RETURN(result);
}
+#ifndef HAVE_VM_OP_FAULT
+static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
+{
+ cfs_page_t *vmpage;
+
+ vmpage = filemap_nopage(cfio->ft_vma, cfio->nopage.ft_address,
+ cfio->nopage.ft_type);
+
+ if (vmpage == NOPAGE_SIGBUS) {
+ CDEBUG(D_PAGE, "got addr %lu type %lx - SIGBUS\n",
+ cfio->nopage.ft_address,(long)cfio->nopage.ft_type);
+ return -EFAULT;
+ } else if (vmpage == NOPAGE_OOM) {
+ CDEBUG(D_PAGE, "got addr %lu type %lx - OOM\n",
+ cfio->nopage.ft_address, (long)cfio->nopage.ft_type);
+ return -ENOMEM;
+ }
+
+ LL_CDEBUG_PAGE(D_PAGE, vmpage, "got addr %lu type %lx\n",
+ cfio->nopage.ft_address, (long)cfio->nopage.ft_type);
+
+ cfio->ft_vmpage = vmpage;
+
+ return 0;
+}
+#else
+static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
+{
+ cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, cfio->fault.ft_vmf);
+
+ if (cfio->fault.ft_vmf->page) {
+ LL_CDEBUG_PAGE(D_PAGE, cfio->fault.ft_vmf->page,
+ "got addr %p type NOPAGE\n",
+ cfio->fault.ft_vmf->virtual_address);
+ /*XXX workaround to bug in CLIO - he deadlocked with
+ lock cancel if page locked */
+ if (likely(cfio->fault.ft_flags & VM_FAULT_LOCKED)) {
+ unlock_page(cfio->fault.ft_vmf->page);
+ cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
+ }
+
+ cfio->ft_vmpage = cfio->fault.ft_vmf->page;
+ return 0;
+ }
+
+ if (unlikely (cfio->fault.ft_flags & VM_FAULT_ERROR)) {
+ CDEBUG(D_PAGE, "got addr %p - SIGBUS\n",
+ cfio->fault.ft_vmf->virtual_address);
+ return -EFAULT;
+ }
+
+ if (unlikely (cfio->fault.ft_flags & VM_FAULT_NOPAGE)) {
+ CDEBUG(D_PAGE, "got addr %p - OOM\n",
+ cfio->fault.ft_vmf->virtual_address);
+ return -ENOMEM;
+ }
+
+ if (unlikely(cfio->fault.ft_flags & VM_FAULT_RETRY))
+ return -EAGAIN;
+
+ CERROR("unknow error in page fault!\n");
+ return -EINVAL;
+}
+
+#endif
+
static int vvp_io_fault_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct inode *inode = ccc_object_inode(obj);
struct cl_fault_io *fio = &io->u.ci_fault;
struct vvp_fault_io *cfio = &vio->u.fault;
- cfs_page_t *vmpage;
loff_t offset;
+ int kernel_result = 0;
int result = 0;
-
- LASSERT(vio->cui_oneshot == 0);
+ struct cl_page *page;
+ loff_t size;
+ pgoff_t last; /* last page in a file data region */
if (fio->ft_executable &&
LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime)
if (result != 0)
return result;
- vmpage = filemap_nopage(cfio->ft_vma, cfio->ft_address, cfio->ft_type);
- if (vmpage != NOPAGE_SIGBUS && vmpage != NOPAGE_OOM)
- LL_CDEBUG_PAGE(D_PAGE, vmpage,
- "got addr %lu type %lx\n",
- cfio->ft_address, (long)cfio->ft_type);
- else
- CDEBUG(D_PAGE, "got addr %lu type %lx - SIGBUS\n",
- cfio->ft_address, (long)cfio->ft_type);
-
- if (vmpage == NOPAGE_SIGBUS)
- result = -EFAULT;
- else if (vmpage == NOPAGE_OOM)
- result = -ENOMEM;
- else {
- struct cl_page *page;
- loff_t size;
- pgoff_t last; /* last page in a file data region */
-
- /* Temporarily lock vmpage to keep cl_page_find() happy. */
- lock_page(vmpage);
- page = cl_page_find(env, obj, fio->ft_index, vmpage,
- CPT_CACHEABLE);
- unlock_page(vmpage);
- if (!IS_ERR(page)) {
- size = i_size_read(inode);
- last = cl_index(obj, size - 1);
- if (fio->ft_index == last)
- /*
- * Last page is mapped partially.
- */
- fio->ft_nob = size - cl_offset(obj,
- fio->ft_index);
- else
- fio->ft_nob = cl_page_size(obj);
- lu_ref_add(&page->cp_reference, "fault", io);
- fio->ft_page = page;
- /*
- * Certain 2.6 kernels return not-NULL from
- * filemap_nopage() when page is beyond the file size,
- * on the grounds that "An external ptracer can access
- * pages that normally aren't accessible.." Don't
- * propagate such page fault to the lower layers to
- * avoid side-effects like KMS updates.
- */
- if (fio->ft_index > last)
- result = +1;
- } else
- result = PTR_ERR(page);
+ /* must return unlocked page */
+ kernel_result = vvp_io_kernel_fault(cfio);
+ if (kernel_result != 0)
+ return kernel_result;
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE)) {
+ truncate_inode_pages_range(inode->i_mapping,
+ cl_offset(obj, fio->ft_index), offset);
+ }
+
+ /* Temporarily lock vmpage to keep cl_page_find() happy. */
+ lock_page(cfio->ft_vmpage);
+
+ /* Though we have already held a cl_lock upon this page, but
+ * it still can be truncated locally. */
+ if (unlikely(cfio->ft_vmpage->mapping == NULL)) {
+ unlock_page(cfio->ft_vmpage);
+
+ CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");
+
+ /* return +1 to stop cl_io_loop() and ll_fault() will catch
+ * and retry. */
+ return +1;
}
+
+ page = cl_page_find(env, obj, fio->ft_index, cfio->ft_vmpage,
+ CPT_CACHEABLE);
+ unlock_page(cfio->ft_vmpage);
+ if (IS_ERR(page)) {
+ page_cache_release(cfio->ft_vmpage);
+ cfio->ft_vmpage = NULL;
+ return PTR_ERR(page);
+ }
+
+ size = i_size_read(inode);
+ last = cl_index(obj, size - 1);
+ if (fio->ft_index == last)
+ /*
+ * Last page is mapped partially.
+ */
+ fio->ft_nob = size - cl_offset(obj, fio->ft_index);
+ else
+ fio->ft_nob = cl_page_size(obj);
+
+ lu_ref_add(&page->cp_reference, "fault", io);
+ fio->ft_page = page;
+ /*
+ * Certain 2.6 kernels return not-NULL from
+ * filemap_nopage() when page is beyond the file size,
+ * on the grounds that "An external ptracer can access
+ * pages that normally aren't accessible.." Don't
+ * propagate such page fault to the lower layers to
+ * avoid side-effects like KMS updates.
+ */
+ if (fio->ft_index > last)
+ result = +1;
+
return result;
}
int rc;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- LASSERT(cl2vvp_io(env, ios)->cui_oneshot == 0);
LASSERT(slice->cpl_obj == obj);
ENTRY;
- if (sbi->ll_ra_info.ra_max_pages_per_file)
+ if (sbi->ll_ra_info.ra_max_pages_per_file &&
+ sbi->ll_ra_info.ra_max_pages)
ras_update(sbi, inode, ras, page->cp_index,
cp->cpg_defer_uptodate);
/* Sanity check whether the page is protected by a lock. */
rc = cl_page_is_under_lock(env, io, page);
if (rc != -EBUSY) {
- CL_PAGE_HEADER(D_WARNING, env, page, "%s: %i\n",
+ CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n",
rc == -ENODATA ? "without a lock" :
"match failed", rc);
if (rc != -ENODATA)
* this will unlock it automatically as part of cl_page_list_disown().
*/
cl_2queue_add(queue, page);
- if (sbi->ll_ra_info.ra_max_pages_per_file)
+ if (sbi->ll_ra_info.ra_max_pages_per_file &&
+ sbi->ll_ra_info.ra_max_pages)
ll_readahead(env, io, ras,
vmpage->mapping, &queue->c2_qin, fd->fd_flags);
static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io,
struct cl_page *page, struct ccc_page *cp,
- int to, enum cl_req_type crt)
+ enum cl_req_type crt)
{
struct cl_2queue *queue;
int result;
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
queue = &io->ci_queue;
-
cl_2queue_init_page(queue, page);
- cl_page_clip(env, page, 0, to);
-
+
result = cl_io_submit_sync(env, io, crt, queue, CRP_NORMAL, 0);
LASSERT(cl_page_is_owned(page, io));
- cl_page_clip(env, page, 0, CFS_PAGE_SIZE);
if (crt == CRT_READ)
/*
} else if (cp->cpg_defer_uptodate)
cp->cpg_ra_used = 1;
else
- result = vvp_page_sync_io(env, io, pg, cp,
- CFS_PAGE_SIZE, CRT_READ);
+ result = vvp_page_sync_io(env, io, pg, cp, CRT_READ);
/*
* In older implementations, obdo_refresh_inode is called here
* to update the inode because the write might modify the
* it will not soon. */
vvp_write_pending(cl2ccc(obj), cp);
result = cl_page_cache_add(env, io, pg, CRT_WRITE);
- if (result == -EDQUOT)
+ if (result == -EDQUOT) {
+ pgoff_t last_index = i_size_read(inode) >> CFS_PAGE_SHIFT;
+ bool need_clip = true;
+
/*
* Client ran out of disk space grant. Possible
* strategies are:
* what the new code continues to do for the time
* being.
*/
- result = vvp_page_sync_io(env, io, pg, cp,
- to, CRT_WRITE);
+ if (last_index > pg->cp_index) {
+ to = CFS_PAGE_SIZE;
+ need_clip = false;
+ } else if (last_index == pg->cp_index) {
+ int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
+ if (to < size_to)
+ to = size_to;
+ }
+ if (need_clip)
+ cl_page_clip(env, pg, 0, to);
+ result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE);
if (result)
CERROR("Write page %lu of inode %p failed %d\n",
pg->cp_index, inode, result);
+ }
} else {
tallyop = LPROC_LL_DIRTY_HITS;
result = 0;
size = cl_offset(obj, pg->cp_index) + to;
+ ll_inode_size_lock(inode, 0);
if (result == 0) {
- if (size > i_size_read(inode))
- i_size_write(inode, size);
+ if (size > i_size_read(inode)) {
+ cl_isize_write_nolock(inode, size);
+ CDEBUG(D_VFSTRACE, DFID" updating i_size %lu\n",
+ PFID(lu_object_fid(&obj->co_lu)),
+ (unsigned long)size);
+ }
cl_page_export(env, pg, 1);
- } else if (size > i_size_read(inode))
- cl_page_discard(env, io, pg);
+ } else {
+ if (size > i_size_read(inode))
+ cl_page_discard(env, io, pg);
+ }
+ ll_inode_size_unlock(inode, 0);
RETURN(result);
}
.cio_start = vvp_io_write_start,
.cio_advance = ccc_io_advance
},
- [CIT_TRUNC] = {
- .cio_fini = vvp_io_trunc_fini,
- .cio_iter_init = vvp_io_trunc_iter_init,
- .cio_lock = vvp_io_trunc_lock,
- .cio_start = vvp_io_trunc_start,
- .cio_end = vvp_io_trunc_end
+ [CIT_SETATTR] = {
+ .cio_fini = vvp_io_setattr_fini,
+ .cio_iter_init = vvp_io_setattr_iter_init,
+ .cio_lock = vvp_io_setattr_lock,
+ .cio_start = vvp_io_setattr_start,
+ .cio_end = vvp_io_setattr_end
},
[CIT_FAULT] = {
.cio_fini = vvp_io_fault_fini,
CL_IO_SLICE_CLEAN(cio, cui_cl);
cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
- vio->cui_oneshot = 0;
vio->cui_ra_window_set = 0;
result = 0;
if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
- int op;
size_t count;
count = io->u.ci_rw.crw_count;
- op = io->ci_type == CIT_READ ?
- LPROC_LL_READ_BYTES : LPROC_LL_WRITE_BYTES;
- if (io->ci_type == CIT_WRITE)
- down(&ll_i2info(inode)->lli_write_sem);
/* "If nbyte is 0, read() will return 0 and have no other
* results." -- Single Unix Spec */
if (count == 0)
else {
cio->cui_tot_count = count;
cio->cui_tot_nrsegs = 0;
- ll_stats_ops_tally(sbi, op, count);
}
- } else if (io->ci_type == CIT_TRUNC) {
- /* lockless truncate? */
- ll_stats_ops_tally(sbi, LPROC_LL_TRUNC, 1);
+ } else if (io->ci_type == CIT_SETATTR) {
+ if (cl_io_is_trunc(io))
+ /* lockless truncate? */
+ ll_stats_ops_tally(sbi, LPROC_LL_TRUNC, 1);
+ else
+ io->ci_lockreq = CILR_MANDATORY;
}
RETURN(result);
}