#define DEBUG_SUBSYSTEM S_LLITE
-
#include <obd.h>
+#include <linux/pagevec.h>
+#include <linux/memcontrol.h>
+
#include "llite_internal.h"
#include "vvp_internal.h"
+#include <libcfs/linux/linux-misc.h>
static struct vvp_io *cl2vvp_io(const struct lu_env *env,
const struct cl_io_slice *slice)
}
/**
- * True, if \a io is a normal io, False for splice_{read,write}
- */
-static int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
-{
- struct vvp_io *vio = vvp_env_io(env);
-
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
- return vio->vui_io_subtype == IO_NORMAL;
-}
-
-/**
* For swapping layout. The file's layout may have changed.
* To avoid populating pages to a wrong stripe, we have to verify the
* correctness of layout. It works because swapping layout processes
{
struct vvp_io *vio = cl2vvp_io(env, ios);
- cl_page_list_init(&vio->u.write.vui_queue);
- vio->u.write.vui_written = 0;
- vio->u.write.vui_from = 0;
- vio->u.write.vui_to = PAGE_SIZE;
+ cl_page_list_init(&vio->u.readwrite.vui_queue);
+ vio->u.readwrite.vui_written = 0;
+ vio->u.readwrite.vui_from = 0;
+ vio->u.readwrite.vui_to = PAGE_SIZE;
+
+ return 0;
+}
+
+static int vvp_io_read_iter_init(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+
+ vio->u.readwrite.vui_read = 0;
return 0;
}
{
struct vvp_io *vio = cl2vvp_io(env, ios);
- LASSERT(vio->u.write.vui_queue.pl_nr == 0);
+ LASSERT(vio->u.readwrite.vui_queue.pl_nr == 0);
}
static int vvp_io_fault_iter_init(const struct lu_env *env,
struct inode *inode = vvp_object_inode(ios->cis_obj);
LASSERT(inode == file_inode(vio->vui_fd->fd_file));
- vio->u.fault.ft_mtime = inode->i_mtime.tv_sec;
return 0;
}
union ldlm_policy_data policy;
struct iovec iov;
struct iov_iter i;
+ unsigned long addr;
+ ssize_t count;
int result = 0;
ENTRY;
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- if (!cl_is_normalio(env, io))
- RETURN(0);
-
/* nfs or loop back device write */
if (vio->vui_iter == NULL)
RETURN(0);
if (mm == NULL)
RETURN(0);
- iov_for_each(iov, i, *(vio->vui_iter)) {
- unsigned long addr = (unsigned long)iov.iov_base;
- size_t count = iov.iov_len;
+ if (!iter_is_iovec(vio->vui_iter) && !iov_iter_is_kvec(vio->vui_iter))
+ RETURN(0);
+
+ for (i = *vio->vui_iter;
+ iov_iter_count(&i);
+ iov_iter_advance(&i, iov.iov_len)) {
+ iov = iov_iter_iovec(&i);
+ addr = (unsigned long)iov.iov_base;
+ count = iov.iov_len;
if (count == 0)
continue;
const struct cl_io_slice *ios,
size_t nob)
{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct cl_io *io = ios->cis_io;
struct cl_object *obj = ios->cis_io->ci_obj;
+ struct vvp_io *vio = cl2vvp_io(env, ios);
CLOBINVRNT(env, obj, vvp_object_invariant(obj));
- if (!cl_is_normalio(env, io))
- return;
-
+ /*
+ * Since 3.16(26978b8b4) vfs revert iov iter to
+ * original position even io succeed, so instead
+ * of relying on VFS, we move iov iter by ourselves.
+ */
+ iov_iter_advance(vio->vui_iter, nob);
vio->vui_tot_count -= nob;
iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count);
}
{
size_t size = io->u.ci_rw.crw_count;
- if (!cl_is_normalio(env, io) || vio->vui_iter == NULL)
+ if (!vio->vui_iter)
return;
iov_iter_truncate(vio->vui_iter, size);
ast_flags |= CEF_NONBLOCK;
if (io->ci_lock_no_expand)
ast_flags |= CEF_LOCK_NO_EXPAND;
+ if (vio->vui_fd) {
+ /* Group lock held means no lockless any more */
+ if (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
+ io->ci_ignore_lockless = 1;
+
+ if (ll_file_nolock(vio->vui_fd->fd_file) ||
+ (vio->vui_fd->fd_flags & LL_FILE_LOCKLESS_IO &&
+ !io->ci_ignore_lockless))
+ ast_flags |= CEF_NEVER;
+ }
result = vvp_mmap_locks(env, vio, io);
if (result == 0)
const struct cl_io_slice *ios)
{
struct cl_io *io = ios->cis_io;
- __u64 new_size;
+ __u64 lock_start = 0;
+ __u64 lock_end = OBD_OBJECT_EOF;
__u32 enqflags = 0;
if (cl_io_is_trunc(io)) {
- new_size = io->u.ci_setattr.sa_attr.lvb_size;
- if (new_size == 0)
+ struct inode *inode = vvp_object_inode(io->ci_obj);
+
+ /* set enqueue flags to CEF_MUST in case of encrypted file,
+ * to prevent lockless truncate
+ */
+ if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode))
+ enqflags = CEF_MUST;
+ else if (io->u.ci_setattr.sa_attr.lvb_size == 0)
enqflags = CEF_DISCARD_DATA;
+ } else if (cl_io_is_fallocate(io)) {
+ lock_start = io->u.ci_setattr.sa_falloc_offset;
+ lock_end = lock_start + io->u.ci_setattr.sa_attr.lvb_size;
} else {
unsigned int valid = io->u.ci_setattr.sa_avalid;
io->u.ci_setattr.sa_attr.lvb_atime >=
io->u.ci_setattr.sa_attr.lvb_ctime))
return 0;
-
- new_size = 0;
}
return vvp_io_one_lock(env, io, enqflags, CLM_WRITE,
- new_size, OBD_OBJECT_EOF);
+ lock_start, lock_end);
}
static int vvp_do_vmtruncate(struct inode *inode, size_t size)
struct ll_inode_info *lli = ll_i2info(inode);
if (cl_io_is_trunc(io)) {
- down_write(&lli->lli_trunc_sem);
+ trunc_sem_down_write(&lli->lli_trunc_sem);
+ mutex_lock(&lli->lli_setattr_mutex);
+ inode_dio_wait(inode);
+ } else if (cl_io_is_fallocate(io)) {
inode_lock(inode);
inode_dio_wait(inode);
} else {
- inode_lock(inode);
+ mutex_lock(&lli->lli_setattr_mutex);
}
if (io->u.ci_setattr.sa_avalid & TIMES_SET_FLAGS)
/* Truncate in memory pages - they must be clean pages
* because osc has already notified to destroy osc_extents. */
vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
- inode_dio_write_done(inode);
+ mutex_unlock(&lli->lli_setattr_mutex);
+ trunc_sem_up_write(&lli->lli_trunc_sem);
+ } else if (cl_io_is_fallocate(io)) {
inode_unlock(inode);
- up_write(&lli->lli_trunc_sem);
} else {
- inode_unlock(inode);
+ mutex_unlock(&lli->lli_setattr_mutex);
}
}
static int vvp_io_read_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = io->ci_obj;
- struct inode *inode = vvp_object_inode(obj);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct file *file = vio->vui_fd->fd_file;
- loff_t pos = io->u.ci_rd.rd.crw_pos;
- long cnt = io->u.ci_rd.rd.crw_count;
- long tot = vio->vui_tot_count;
- int exceed = 0;
- int result;
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = io->ci_obj;
+ struct inode *inode = vvp_object_inode(obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct file *file = vio->vui_fd->fd_file;
+ loff_t pos = io->u.ci_rd.rd.crw_pos;
+ size_t cnt = io->u.ci_rd.rd.crw_count;
+ size_t tot = vio->vui_tot_count;
+ int exceed = 0;
+ int result;
+ struct iov_iter iter;
+
ENTRY;
CLOBINVRNT(env, obj, vvp_object_invariant(obj));
file_dentry(file)->d_name.name,
pos, pos + cnt);
- if (vio->vui_io_subtype == IO_NORMAL)
- down_read(&lli->lli_trunc_sem);
+ trunc_sem_down_read(&lli->lli_trunc_sem);
+
+ if (io->ci_async_readahead) {
+ file_accessed(file);
+ RETURN(0);
+ }
if (!can_populate_pages(env, io, inode))
RETURN(0);
GOTO(out, result);
LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
- "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
+ "Read ino %lu, %zu bytes, offset %lld, size %llu\n",
inode->i_ino, cnt, pos, i_size_read(inode));
/* turn off the kernel's read-ahead */
/* initialize read-ahead window once per syscall */
if (!vio->vui_ra_valid) {
vio->vui_ra_valid = true;
- vio->vui_ra_start = cl_index(obj, pos);
- vio->vui_ra_count = cl_index(obj, tot + PAGE_SIZE - 1);
+ vio->vui_ra_start_idx = cl_index(obj, pos);
+ vio->vui_ra_pages = cl_index(obj, tot + PAGE_SIZE - 1);
+ /* If both start and end are unaligned, we read one more page
+ * than the index math suggests. */
+ if ((pos & ~PAGE_MASK) != 0 && ((pos + tot) & ~PAGE_MASK) != 0)
+ vio->vui_ra_pages++;
+
+ CDEBUG(D_READA, "tot %zu, ra_start %lu, ra_count %lu\n",
+ tot, vio->vui_ra_start_idx, vio->vui_ra_pages);
}
/* BUG: 5972 */
file_accessed(file);
- switch (vio->vui_io_subtype) {
- case IO_NORMAL:
- LASSERT(vio->vui_iocb->ki_pos == pos);
- result = generic_file_read_iter(vio->vui_iocb, vio->vui_iter);
- break;
- case IO_SPLICE:
- result = generic_file_splice_read(file, &pos,
- vio->u.splice.vui_pipe, cnt,
- vio->u.splice.vui_flags);
- /* LU-1109: do splice read stripe by stripe otherwise if it
- * may make nfsd stuck if this read occupied all internal pipe
- * buffers. */
- io->ci_continue = 0;
- break;
- default:
- CERROR("Wrong IO type %u\n", vio->vui_io_subtype);
- LBUG();
- }
- GOTO(out, result);
-
+ LASSERT(vio->vui_iocb->ki_pos == pos);
+ iter = *vio->vui_iter;
+ result = generic_file_read_iter(vio->vui_iocb, &iter);
out:
if (result >= 0) {
if (result < cnt)
io->ci_continue = 0;
io->ci_nob += result;
- ll_rw_stats_tally(ll_i2sbi(inode), current->pid, vio->vui_fd,
- pos, result, READ);
result = 0;
+ } else if (result == -EIOCBQUEUED) {
+ io->ci_nob += vio->u.readwrite.vui_read;
+ vio->vui_iocb->ki_pos = pos + vio->u.readwrite.vui_read;
}
return result;
RETURN(bytes > 0 ? bytes : rc);
}
+/*
+ * Kernels 4.2 - 4.5 pass memcg argument to account_page_dirtied()
+ * Kernel v5.2-5678-gac1c3e4 no longer exports account_page_dirtied
+ */
+static inline void ll_account_page_dirtied(struct page *page,
+ struct address_space *mapping)
+{
+#ifdef HAVE_ACCOUNT_PAGE_DIRTIED_3ARGS
+ struct mem_cgroup *memcg = mem_cgroup_begin_page_stat(page);
+
+ account_page_dirtied(page, mapping, memcg);
+ mem_cgroup_end_page_stat(memcg);
+#elif defined(HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT)
+ account_page_dirtied(page, mapping);
+#else
+ vvp_account_page_dirtied(page, mapping);
+#endif
+}
+
+/*
+ * From kernel v4.19-rc5-248-g9b89a0355144 use XArrary
+ * Prior kernels use radix_tree for tags
+ */
+static inline void ll_page_tag_dirty(struct page *page,
+ struct address_space *mapping)
+{
+#ifndef HAVE_RADIX_TREE_TAG_SET
+ __xa_set_mark(&mapping->i_pages, page_index(page), PAGECACHE_TAG_DIRTY);
+#else
+ radix_tree_tag_set(&mapping->page_tree, page_index(page),
+ PAGECACHE_TAG_DIRTY);
+#endif
+}
+
+/* Taken from kernel set_page_dirty, __set_page_dirty_nobuffers
+ * Last change to this area: b93b016313b3ba8003c3b8bb71f569af91f19fc7
+ *
+ * Current with Linus tip of tree (7/13/2019):
+ * v5.2-rc4-224-ge01e060fe0
+ *
+ * Backwards compat for 3.x, 5.x kernels relating to memcg handling
+ * & rename of radix tree to xarray.
+ */
+void vvp_set_pagevec_dirty(struct pagevec *pvec)
+{
+ struct page *page = pvec->pages[0];
+ struct address_space *mapping = page->mapping;
+ unsigned long flags;
+ unsigned long skip_pages = 0;
+ int count = pagevec_count(pvec);
+ int dirtied = 0;
+ int i;
+
+ ENTRY;
+
+ BUILD_BUG_ON(PAGEVEC_SIZE > BITS_PER_LONG);
+ LASSERTF(page->mapping,
+ "mapping must be set. page %p, page->private (cl_page) %p\n",
+ page, (void *) page->private);
+
+ for (i = 0; i < count; i++) {
+ page = pvec->pages[i];
+
+ ClearPageReclaim(page);
+
+ lock_page_memcg(page);
+ if (TestSetPageDirty(page)) {
+ /* page is already dirty .. no extra work needed
+ * set a flag for the i'th page to be skipped
+ */
+ unlock_page_memcg(page);
+ skip_pages |= (1 << i);
+ }
+ }
+
+ ll_xa_lock_irqsave(&mapping->i_pages, flags);
+
+ /* Notes on differences with __set_page_dirty_nobuffers:
+ * 1. We don't need to call page_mapping because we know this is a page
+ * cache page.
+ * 2. We have the pages locked, so there is no need for the careful
+ * mapping/mapping2 dance.
+ * 3. No mapping is impossible. (Race w/truncate mentioned in
+ * dirty_nobuffers should be impossible because we hold the page lock.)
+ * 4. All mappings are the same because i/o is only to one file.
+ */
+ for (i = 0; i < count; i++) {
+ page = pvec->pages[i];
+ /* if the i'th page was unlocked above, skip it here */
+ if ((skip_pages >> i) & 1)
+ continue;
+
+ LASSERTF(page->mapping == mapping,
+ "all pages must have the same mapping. page %p, mapping %p, first mapping %p\n",
+ page, page->mapping, mapping);
+ WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
+ ll_account_page_dirtied(page, mapping);
+ ll_page_tag_dirty(page, mapping);
+ dirtied++;
+ unlock_page_memcg(page);
+ }
+ ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
+
+ CDEBUG(D_VFSTRACE, "mapping %p, count %d, dirtied %d\n", mapping,
+ count, dirtied);
+
+ if (mapping->host && dirtied) {
+ /* !PageAnon && !swapper_space */
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+ }
+
+ EXIT;
+}
+
static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page)
+ struct pagevec *pvec)
{
- struct page *vmpage = page->cp_vmpage;
+ int count = 0;
+ int i = 0;
+
+ ENTRY;
+
+ count = pagevec_count(pvec);
+ LASSERT(count > 0);
+
+ for (i = 0; i < count; i++) {
+ struct page *vmpage = pvec->pages[i];
+ SetPageUptodate(vmpage);
+ }
- SetPageUptodate(vmpage);
- set_page_dirty(vmpage);
+ vvp_set_pagevec_dirty(pvec);
- cl_page_disown(env, io, page);
+ for (i = 0; i < count; i++) {
+ struct page *vmpage = pvec->pages[i];
+ struct cl_page *page = (struct cl_page *) vmpage->private;
+ cl_page_disown(env, io, page);
+ lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
+ cl_page_put(env, page);
+ }
- /* held in ll_cl_init() */
- lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
- cl_page_put(env, page);
+ EXIT;
}
/* make sure the page list is contiguous */
struct cl_object *obj = io->ci_obj;
struct inode *inode = vvp_object_inode(obj);
struct vvp_io *vio = vvp_env_io(env);
- struct cl_page_list *queue = &vio->u.write.vui_queue;
+ struct cl_page_list *queue = &vio->u.readwrite.vui_queue;
struct cl_page *page;
int rc = 0;
int bytes = 0;
- unsigned int npages = vio->u.write.vui_queue.pl_nr;
+ unsigned int npages = vio->u.readwrite.vui_queue.pl_nr;
ENTRY;
if (npages == 0)
RETURN(0);
CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n",
- npages, vio->u.write.vui_from, vio->u.write.vui_to);
+ npages, vio->u.readwrite.vui_from, vio->u.readwrite.vui_to);
LASSERT(page_list_sanity_check(obj, queue));
/* submit IO with async write */
rc = cl_io_commit_async(env, io, queue,
- vio->u.write.vui_from, vio->u.write.vui_to,
+ vio->u.readwrite.vui_from,
+ vio->u.readwrite.vui_to,
write_commit_callback);
npages -= queue->pl_nr; /* already committed pages */
if (npages > 0) {
bytes = npages << PAGE_SHIFT;
/* first page */
- bytes -= vio->u.write.vui_from;
+ bytes -= vio->u.readwrite.vui_from;
if (queue->pl_nr == 0) /* last page */
- bytes -= PAGE_SIZE - vio->u.write.vui_to;
+ bytes -= PAGE_SIZE - vio->u.readwrite.vui_to;
LASSERTF(bytes > 0, "bytes = %d, pages = %d\n", bytes, npages);
- vio->u.write.vui_written += bytes;
+ vio->u.readwrite.vui_written += bytes;
CDEBUG(D_VFSTRACE, "Committed %d pages %d bytes, tot: %ld\n",
- npages, bytes, vio->u.write.vui_written);
+ npages, bytes, vio->u.readwrite.vui_written);
/* the first page must have been written. */
- vio->u.write.vui_from = 0;
+ vio->u.readwrite.vui_from = 0;
}
LASSERT(page_list_sanity_check(obj, queue));
LASSERT(ergo(rc == 0, queue->pl_nr == 0));
/* out of quota, try sync write */
if (rc == -EDQUOT && !cl_io_is_mkwrite(io)) {
rc = vvp_io_commit_sync(env, io, queue,
- vio->u.write.vui_from,
- vio->u.write.vui_to);
+ vio->u.readwrite.vui_from,
+ vio->u.readwrite.vui_to);
if (rc > 0) {
- vio->u.write.vui_written += rc;
+ vio->u.readwrite.vui_written += rc;
rc = 0;
}
}
loff_t pos = io->u.ci_wr.wr.crw_pos;
size_t cnt = io->u.ci_wr.wr.crw_count;
bool lock_inode = !IS_NOSEC(inode);
+ size_t nob = io->ci_nob;
+ struct iov_iter iter;
+ size_t written = 0;
ENTRY;
- if (vio->vui_io_subtype == IO_NORMAL)
- down_read(&lli->lli_trunc_sem);
+ trunc_sem_down_read(&lli->lli_trunc_sem);
if (!can_populate_pages(env, io, inode))
RETURN(0);
* trucates, etc. is handled in the higher layers of lustre.
*/
lock_inode = !IS_NOSEC(inode);
+ iter = *vio->vui_iter;
if (unlikely(lock_inode))
inode_lock(inode);
- result = __generic_file_write_iter(vio->vui_iocb,
- vio->vui_iter);
+ result = __generic_file_write_iter(vio->vui_iocb, &iter);
if (unlikely(lock_inode))
inode_unlock(inode);
- if (result > 0 || result == -EIOCBQUEUED)
+ written = result;
+ if (result > 0)
#ifdef HAVE_GENERIC_WRITE_SYNC_2ARGS
result = generic_write_sync(vio->vui_iocb, result);
#else
if (result > 0) {
result = vvp_io_write_commit(env, io);
- if (vio->u.write.vui_written > 0) {
- result = vio->u.write.vui_written;
+ /* Simulate short commit */
+ if (CFS_FAULT_CHECK(OBD_FAIL_LLITE_SHORT_COMMIT)) {
+ vio->u.readwrite.vui_written >>= 1;
+ if (vio->u.readwrite.vui_written > 0)
+ io->ci_need_restart = 1;
+ }
+ if (vio->u.readwrite.vui_written > 0) {
+ result = vio->u.readwrite.vui_written;
CDEBUG(D_VFSTRACE, "%s: write nob %zd, result: %zd\n",
file_dentry(file)->d_name.name,
io->ci_nob, result);
io->ci_nob += result;
+ } else {
+ io->ci_continue = 0;
}
}
- if (result > 0) {
+ if (vio->vui_iocb->ki_pos != (pos + io->ci_nob - nob)) {
+ CDEBUG(D_VFSTRACE,
+ "%s: write position mismatch: ki_pos %lld vs. pos %lld, written %zd, commit %zd: rc = %zd\n",
+ file_dentry(file)->d_name.name,
+ vio->vui_iocb->ki_pos, pos + io->ci_nob - nob,
+ written, io->ci_nob - nob, result);
+ /*
+ * Rewind ki_pos and vui_iter to where it has
+ * successfully committed.
+ */
+ vio->vui_iocb->ki_pos = pos + io->ci_nob - nob;
+ }
+ if (result > 0 || result == -EIOCBQUEUED) {
ll_file_set_flag(ll_i2info(inode), LLIF_DATA_MODIFIED);
- if (result < cnt)
+ if (result != -EIOCBQUEUED && result < cnt)
io->ci_continue = 0;
- ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
- vio->vui_fd, pos, result, WRITE);
- result = 0;
+ if (result > 0)
+ result = 0;
+ /* move forward */
+ if (result == -EIOCBQUEUED) {
+ io->ci_nob += vio->u.readwrite.vui_written;
+ vio->vui_iocb->ki_pos = pos +
+ vio->u.readwrite.vui_written;
+ }
}
RETURN(result);
static void vvp_io_rw_end(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct vvp_io *vio = cl2vvp_io(env, ios);
struct inode *inode = vvp_object_inode(ios->cis_obj);
struct ll_inode_info *lli = ll_i2info(inode);
- if (vio->vui_io_subtype == IO_NORMAL)
- up_read(&lli->lli_trunc_sem);
+ trunc_sem_up_read(&lli->lli_trunc_sem);
}
static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
}
static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page)
+ struct pagevec *pvec)
{
- set_page_dirty(page->cp_vmpage);
+ vvp_set_pagevec_dirty(pvec);
}
static int vvp_io_fault_start(const struct lu_env *env,
pgoff_t last_index;
ENTRY;
- down_read(&lli->lli_trunc_sem);
+ trunc_sem_down_read_nowait(&lli->lli_trunc_sem);
/* offset of the last byte on the page */
offset = cl_offset(obj, fio->ft_index + 1) - 1;
LASSERT(PageLocked(vmpage));
if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE))
- ll_invalidate_page(vmpage);
+ generic_error_remove_page(vmpage->mapping, vmpage);
size = i_size_read(inode);
/* Though we have already held a cl_lock upon this page, but
if (fio->ft_mkwrite) {
wait_on_page_writeback(vmpage);
if (!PageDirty(vmpage)) {
- struct cl_page_list *plist = &io->ci_queue.c2_qin;
+ struct cl_page_list *plist = &vio->u.fault.ft_queue;
struct vvp_page *vpg = cl_object_page_slice(obj, page);
int to = PAGE_SIZE;
/* size fixup */
if (last_index == vvp_index(vpg))
- to = size & ~PAGE_MASK;
+ to = ((size - 1) & ~PAGE_MASK) + 1;
/* Do not set Dirty bit here so that in case IO is
* started before the page is really made dirty, we
* still have chance to detect it. */
result = cl_io_commit_async(env, io, plist, 0, to,
mkwrite_commit_callback);
+ /* Have overquota flag, trying sync write to check
+ * whether indeed out of quota */
+ if (result == -EDQUOT) {
+ cl_page_get(page);
+ result = vvp_io_commit_sync(env, io,
+ plist, 0, to);
+ if (result >= 0) {
+ io->ci_noquota = 1;
+ cl_page_own(env, io, page);
+ cl_page_list_add(plist, page);
+ lu_ref_add(&page->cp_reference,
+ "cl_io", io);
+ result = cl_io_commit_async(env, io,
+ plist, 0, to,
+ mkwrite_commit_callback);
+ io->ci_noquota = 0;
+ } else {
+ cl_page_put(env, page);
+ }
+ }
+
LASSERT(cl_page_is_owned(page, io));
cl_page_list_fini(env, plist);
if (result == -EDQUOT)
result = -ENOSPC;
GOTO(out, result);
- } else
+ } else {
cl_page_disown(env, io, page);
+ }
}
}
CLOBINVRNT(env, ios->cis_io->ci_obj,
vvp_object_invariant(ios->cis_io->ci_obj));
- up_read(&lli->lli_trunc_sem);
+ trunc_sem_up_read(&lli->lli_trunc_sem);
}
static int vvp_io_fsync_start(const struct lu_env *env,
struct vvp_io *vio = cl2vvp_io(env, ios);
if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- ra->cra_end = CL_PAGE_EOF;
+ ra->cra_end_idx = CL_PAGE_EOF;
result = +1; /* no need to call down */
}
}
RETURN(result);
}
+static int vvp_io_lseek_lock(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct cl_io *io = ios->cis_io;
+ __u64 lock_start = io->u.ci_lseek.ls_start;
+ __u64 lock_end = OBD_OBJECT_EOF;
+ __u32 enqflags = CEF_MUST; /* always take client lock */
+
+ return vvp_io_one_lock(env, io, enqflags, CLM_READ,
+ lock_start, lock_end);
+}
+
+static int vvp_io_lseek_start(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct cl_io *io = ios->cis_io;
+ struct inode *inode = vvp_object_inode(io->ci_obj);
+ __u64 start = io->u.ci_lseek.ls_start;
+
+ inode_lock(inode);
+ inode_dio_wait(inode);
+
+ /* At the moment we have DLM lock so just update inode
+ * to know the file size.
+ */
+ ll_merge_attr(env, inode);
+ if (start >= i_size_read(inode)) {
+ io->u.ci_lseek.ls_result = -ENXIO;
+ return -ENXIO;
+ }
+ return 0;
+}
+
+static void vvp_io_lseek_end(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct cl_io *io = ios->cis_io;
+ struct inode *inode = vvp_object_inode(io->ci_obj);
+
+ if (io->u.ci_lseek.ls_result > i_size_read(inode))
+ io->u.ci_lseek.ls_result = -ENXIO;
+
+ inode_unlock(inode);
+}
+
static const struct cl_io_operations vvp_io_ops = {
.op = {
[CIT_READ] = {
.cio_fini = vvp_io_fini,
+ .cio_iter_init = vvp_io_read_iter_init,
.cio_lock = vvp_io_read_lock,
.cio_start = vvp_io_read_start,
.cio_end = vvp_io_rw_end,
[CIT_LADVISE] = {
.cio_fini = vvp_io_fini
},
+ [CIT_LSEEK] = {
+ .cio_fini = vvp_io_fini,
+ .cio_lock = vvp_io_lseek_lock,
+ .cio_start = vvp_io_lseek_start,
+ .cio_end = vvp_io_lseek_end,
+ },
},
.cio_read_ahead = vvp_io_read_ahead
};