*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_LLITE
-
#include <obd.h>
+#include <linux/pagevec.h>
+#include <linux/memcontrol.h>
+
+#include "llite_internal.h"
#include "vvp_internal.h"
+#include <libcfs/linux/linux-misc.h>
static struct vvp_io *cl2vvp_io(const struct lu_env *env,
- const struct cl_io_slice *slice);
+ const struct cl_io_slice *slice)
+{
+ struct vvp_io *vio;
+
+ vio = container_of(slice, struct vvp_io, vui_cl);
+ LASSERT(vio == vvp_env_io(env));
+
+ return vio;
+}
/**
* True, if \a io is a normal io, False for splice_{read,write}
*/
-int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
+static int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
{
- struct vvp_io *vio = vvp_env_io(env);
+ struct vvp_io *vio = vvp_env_io(env);
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
+ LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- return vio->cui_io_subtype == IO_NORMAL;
+ return vio->vui_io_subtype == IO_NORMAL;
}
/**
struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct ccc_io *cio = ccc_env_io(env);
+ struct vvp_io *vio = vvp_env_io(env);
bool rc = true;
switch (io->ci_type) {
case CIT_WRITE:
/* don't need lock here to check lli_layout_gen as we have held
* extent lock and GROUP lock has to hold to swap layout */
- if (ll_layout_version_get(lli) != cio->cui_layout_gen) {
+ if (ll_layout_version_get(lli) != vio->vui_layout_gen ||
+ OBD_FAIL_CHECK_RESET(OBD_FAIL_LLITE_LOST_LAYOUT, 0)) {
io->ci_need_restart = 1;
- /* this will return application a short read/write */
+ /* this will cause a short read/write */
io->ci_continue = 0;
rc = false;
}
return rc;
}
+static void vvp_object_size_lock(struct cl_object *obj)
+{
+ struct inode *inode = vvp_object_inode(obj);
+
+ ll_inode_size_lock(inode);
+ cl_object_attr_lock(obj);
+}
+
+static void vvp_object_size_unlock(struct cl_object *obj)
+{
+ struct inode *inode = vvp_object_inode(obj);
+
+ cl_object_attr_unlock(obj);
+ ll_inode_size_unlock(inode);
+}
+
+/**
+ * Helper function that if necessary adjusts file size (inode->i_size), when
+ * position at the offset \a pos is accessed. File size can be arbitrary stale
+ * on a Lustre client, but client at least knows KMS. If accessed area is
+ * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
+ *
+ * Locking: i_size_lock is used to serialize changes to inode size and to
+ * protect consistency between inode size and cl_object
+ * attributes. cl_object_size_lock() protects consistency between cl_attr's of
+ * top-object and sub-objects.
+ */
+static int vvp_prep_size(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io, loff_t start, size_t count,
+ int *exceed)
+{
+ struct cl_attr *attr = vvp_env_thread_attr(env);
+ struct inode *inode = vvp_object_inode(obj);
+ loff_t pos = start + count - 1;
+ loff_t kms;
+ int result;
+
+ /*
+ * Consistency guarantees: following possibilities exist for the
+ * relation between region being accessed and real file size at this
+ * moment:
+ *
+ * (A): the region is completely inside of the file;
+ *
+ * (B-x): x bytes of region are inside of the file, the rest is
+ * outside;
+ *
+ * (C): the region is completely outside of the file.
+ *
+ * This classification is stable under DLM lock already acquired by
+ * the caller, because to change the class, other client has to take
+ * DLM lock conflicting with our lock. Also, any updates to ->i_size
+ * by other threads on this client are serialized by
+ * ll_inode_size_lock(). This guarantees that short reads are handled
+ * correctly in the face of concurrent writes and truncates.
+ */
+ vvp_object_size_lock(obj);
+ result = cl_object_attr_get(env, obj, attr);
+ if (result == 0) {
+ kms = attr->cat_kms;
+ if (pos > kms) {
+ /*
+ * A glimpse is necessary to determine whether we
+ * return a short read (B) or some zeroes at the end
+ * of the buffer (C)
+ */
+ vvp_object_size_unlock(obj);
+ result = cl_glimpse_lock(env, io, inode, obj, 0);
+ if (result == 0 && exceed != NULL) {
+ /* If objective page index exceed end-of-file
+ * page index, return directly. Do not expect
+ * kernel will check such case correctly.
+ * linux-2.6.18-128.1.1 miss to do that.
+ * --bug 17336 */
+ loff_t size = i_size_read(inode);
+ unsigned long cur_index = start >>
+ PAGE_SHIFT;
+
+ if ((size == 0 && cur_index != 0) ||
+ (((size - 1) >> PAGE_SHIFT) <
+ cur_index))
+ *exceed = 1;
+ }
+
+ return result;
+ } else {
+ /*
+ * region is within kms and, hence, within real file
+ * size (A). We need to increase i_size to cover the
+ * read region so that generic_file_read() will do its
+ * job, but that doesn't mean the kms size is
+ * _correct_, it is only the _minimum_ size. If
+ * someone does a stat they will get the correct size
+ * which will always be >= the kms value here.
+ * b=11081
+ */
+ if (i_size_read(inode) < kms) {
+ i_size_write(inode, kms);
+ CDEBUG(D_VFSTRACE,
+ DFID" updating i_size %llu\n",
+ PFID(lu_object_fid(&obj->co_lu)),
+ (__u64)i_size_read(inode));
+ }
+ }
+ }
+
+ vvp_object_size_unlock(obj);
+
+ return result;
+}
+
/*****************************************************************************
*
* io operations.
*
*/
+static int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
+ __u32 enqflags, enum cl_lock_mode mode,
+ pgoff_t start, pgoff_t end)
+{
+ struct vvp_io *vio = vvp_env_io(env);
+ struct cl_lock_descr *descr = &vio->vui_link.cill_descr;
+ struct cl_object *obj = io->ci_obj;
+
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+ ENTRY;
+
+ CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
+
+ memset(&vio->vui_link, 0, sizeof vio->vui_link);
+
+ if (vio->vui_fd && (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ descr->cld_mode = CLM_GROUP;
+ descr->cld_gid = vio->vui_fd->fd_grouplock.lg_gid;
+ enqflags |= CEF_LOCK_MATCH;
+ } else {
+ descr->cld_mode = mode;
+ }
+
+ descr->cld_obj = obj;
+ descr->cld_start = start;
+ descr->cld_end = end;
+ descr->cld_enq_flags = enqflags;
+
+ cl_io_lock_add(env, io, &vio->vui_link);
+
+ RETURN(0);
+}
+
+static int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io,
+ __u32 enqflags, enum cl_lock_mode mode,
+ loff_t start, loff_t end)
+{
+ struct cl_object *obj = io->ci_obj;
+
+ return vvp_io_one_lock_index(env, io, enqflags, mode,
+ cl_index(obj, start), cl_index(obj, end));
+}
+
static int vvp_io_write_iter_init(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct vvp_io *vio = cl2vvp_io(env, ios);
- cl_page_list_init(&cio->u.write.cui_queue);
- cio->u.write.cui_written = 0;
- cio->u.write.cui_from = 0;
- cio->u.write.cui_to = PAGE_SIZE;
+ cl_page_list_init(&vio->u.write.vui_queue);
+ vio->u.write.vui_written = 0;
+ vio->u.write.vui_from = 0;
+ vio->u.write.vui_to = PAGE_SIZE;
return 0;
}
static void vvp_io_write_iter_fini(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct vvp_io *vio = cl2vvp_io(env, ios);
- LASSERT(cio->u.write.cui_queue.pl_nr == 0);
+ LASSERT(vio->u.write.vui_queue.pl_nr == 0);
}
static int vvp_io_fault_iter_init(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct inode *inode = ccc_object_inode(ios->cis_obj);
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct inode *inode = vvp_object_inode(ios->cis_obj);
+
+ LASSERT(inode == file_inode(vio->vui_fd->fd_file));
- LASSERT(inode ==
- cl2ccc_io(env, ios)->cui_fd->fd_file->f_dentry->d_inode);
- vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime);
- return 0;
+ return 0;
}
static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
{
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct ccc_io *cio = cl2ccc_io(env, ios);
- struct inode *inode = ccc_object_inode(obj);
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct inode *inode = vvp_object_inode(obj);
+ __u32 gen = 0;
+ int rc;
+ ENTRY;
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d "
- "restore needed %d\n",
+ "need write layout %d, restore needed %d\n",
PFID(lu_object_fid(&obj->co_lu)),
io->ci_ignore_layout, io->ci_verify_layout,
- cio->cui_layout_gen, io->ci_restore_needed);
-
- if (io->ci_restore_needed == 1) {
- int rc;
+ vio->vui_layout_gen, io->ci_need_write_intent,
+ io->ci_restore_needed);
+ if (io->ci_restore_needed) {
/* file was detected release, we need to restore it
* before finishing the io
*/
/* if restore registration failed, no restart,
* we will return -ENODATA */
/* The layout will change after restore, so we need to
- * block on layout lock hold by the MDT
+ * block on layout lock held by the MDT
* as MDT will not send new layout in lvb (see LU-3124)
* we have to explicitly fetch it, all this will be done
- * by ll_layout_refresh()
+ * by ll_layout_refresh().
+ * Even if ll_layout_restore() returns zero, it doesn't mean
+ * that restore has been successful. Therefore it sets
+ * ci_verify_layout so that it will check layout at the end
+ * of this function.
*/
- if (rc == 0) {
- io->ci_restore_needed = 0;
- io->ci_need_restart = 1;
- io->ci_verify_layout = 1;
- } else {
+ if (rc) {
io->ci_restore_needed = 1;
io->ci_need_restart = 0;
io->ci_verify_layout = 0;
io->ci_result = rc;
+ GOTO(out, rc);
}
- }
- if (!io->ci_ignore_layout && io->ci_verify_layout) {
- __u32 gen = 0;
+ io->ci_restore_needed = 0;
- /* check layout version */
+ /* Even if ll_layout_restore() returns zero, it doesn't mean
+ * that restore has been successful. Therefore it should verify
+ * if there was layout change and restart I/O correspondingly.
+ */
ll_layout_refresh(inode, &gen);
- io->ci_need_restart = cio->cui_layout_gen != gen;
+ io->ci_need_restart = vio->vui_layout_gen != gen;
if (io->ci_need_restart) {
CDEBUG(D_VFSTRACE,
DFID" layout changed from %d to %d.\n",
PFID(lu_object_fid(&obj->co_lu)),
- cio->cui_layout_gen, gen);
+ vio->vui_layout_gen, gen);
/* today successful restore is the only possible
* case */
/* restore was done, clear restoring state */
- ll_i2info(ccc_object_inode(obj))->lli_flags &=
- ~LLIF_FILE_RESTORING;
+ ll_file_clear_flag(ll_i2info(vvp_object_inode(obj)),
+ LLIF_FILE_RESTORING);
}
+ GOTO(out, 0);
}
+
+ /**
+ * dynamic layout change needed, send layout intent
+ * RPC.
+ */
+ if (io->ci_need_write_intent) {
+ enum layout_intent_opc opc = LAYOUT_INTENT_WRITE;
+
+ io->ci_need_write_intent = 0;
+
+ LASSERT(io->ci_type == CIT_WRITE ||
+ cl_io_is_trunc(io) || cl_io_is_mkwrite(io));
+
+ CDEBUG(D_VFSTRACE, DFID" write layout, type %u "DEXT"\n",
+ PFID(lu_object_fid(&obj->co_lu)), io->ci_type,
+ PEXT(&io->ci_write_intent));
+
+ if (cl_io_is_trunc(io))
+ opc = LAYOUT_INTENT_TRUNC;
+
+ rc = ll_layout_write_intent(inode, opc, &io->ci_write_intent);
+ io->ci_result = rc;
+ if (!rc)
+ io->ci_need_restart = 1;
+ GOTO(out, rc);
+ }
+
+ if (!io->ci_need_restart &&
+ !io->ci_ignore_layout && io->ci_verify_layout) {
+ /* check layout version */
+ ll_layout_refresh(inode, &gen);
+ io->ci_need_restart = vio->vui_layout_gen != gen;
+ if (io->ci_need_restart) {
+ CDEBUG(D_VFSTRACE,
+ DFID" layout changed from %d to %d.\n",
+ PFID(lu_object_fid(&obj->co_lu)),
+ vio->vui_layout_gen, gen);
+ }
+ GOTO(out, 0);
+ }
+out:
+ EXIT;
}
static void vvp_io_fault_fini(const struct lu_env *env,
struct cl_io *io = ios->cis_io;
struct cl_page *page = io->u.ci_fault.ft_page;
- CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
+ CLOBINVRNT(env, io->ci_obj, vvp_object_invariant(io->ci_obj));
if (page != NULL) {
lu_ref_del(&page->cp_reference, "fault", io);
}
static int vvp_mmap_locks(const struct lu_env *env,
- struct ccc_io *vio, struct cl_io *io)
+ struct vvp_io *vio, struct cl_io *io)
{
- struct ccc_thread_info *cti = ccc_env_info(env);
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- struct cl_lock_descr *descr = &cti->cti_descr;
- ldlm_policy_data_t policy;
- unsigned long addr;
- unsigned long seg;
- ssize_t count;
- int result = 0;
- ENTRY;
-
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
- if (!cl_is_normalio(env, io))
- RETURN(0);
-
- if (vio->cui_iov == NULL) /* nfs or loop back device write */
- RETURN(0);
-
- /* No MM (e.g. NFS)? No vmas too. */
- if (mm == NULL)
- RETURN(0);
-
- for (seg = 0; seg < vio->cui_nrsegs; seg++) {
- const struct iovec *iv = &vio->cui_iov[seg];
-
- addr = (unsigned long)iv->iov_base;
- count = iv->iov_len;
+ struct vvp_thread_info *vti = vvp_env_info(env);
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ struct cl_lock_descr *descr = &vti->vti_descr;
+ union ldlm_policy_data policy;
+ struct iovec iov;
+ struct iov_iter i;
+ unsigned long addr;
+ ssize_t count;
+ int result = 0;
+ ENTRY;
+
+ LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
+
+ if (!cl_is_normalio(env, io))
+ RETURN(0);
+
+ /* nfs or loop back device write */
+ if (vio->vui_iter == NULL)
+ RETURN(0);
+
+ /* No MM (e.g. NFS)? No vmas too. */
+ if (mm == NULL)
+ RETURN(0);
+
+ if (!iter_is_iovec(vio->vui_iter) && !iov_iter_is_kvec(vio->vui_iter))
+ RETURN(0);
+
+ for (i = *vio->vui_iter;
+ iov_iter_count(&i);
+ iov_iter_advance(&i, iov.iov_len)) {
+ iov = iov_iter_iovec(&i);
+ addr = (unsigned long)iov.iov_base;
+ count = iov.iov_len;
+
if (count == 0)
continue;
- count += addr & (~CFS_PAGE_MASK);
- addr &= CFS_PAGE_MASK;
+ count += addr & ~PAGE_MASK;
+ addr &= PAGE_MASK;
down_read(&mm->mmap_sem);
while((vma = our_vma(mm, addr, count)) != NULL) {
- struct inode *inode = vma->vm_file->f_dentry->d_inode;
+ struct dentry *de = file_dentry(vma->vm_file);
+ struct inode *inode = de->d_inode;
int flags = CEF_MUST;
if (ll_file_nolock(vma->vm_file)) {
RETURN(result);
}
+static void vvp_io_advance(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ size_t nob)
+{
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = ios->cis_io->ci_obj;
+
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+
+ if (!cl_is_normalio(env, io))
+ return;
+
+ vio->vui_tot_count -= nob;
+ iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count);
+}
+
+static void vvp_io_update_iov(const struct lu_env *env,
+ struct vvp_io *vio, struct cl_io *io)
+{
+ size_t size = io->u.ci_rw.crw_count;
+
+ if (!cl_is_normalio(env, io) || vio->vui_iter == NULL)
+ return;
+
+ iov_iter_truncate(vio->vui_iter, size);
+}
+
static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
enum cl_lock_mode mode, loff_t start, loff_t end)
{
- struct ccc_io *cio = ccc_env_io(env);
- int result;
- int ast_flags = 0;
+ struct vvp_io *vio = vvp_env_io(env);
+ int result;
+ int ast_flags = 0;
+
+ LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
+ ENTRY;
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- ENTRY;
+ vvp_io_update_iov(env, vio, io);
+
+ if (io->u.ci_rw.crw_nonblock)
+ ast_flags |= CEF_NONBLOCK;
+ if (io->ci_lock_no_expand)
+ ast_flags |= CEF_LOCK_NO_EXPAND;
+ if (vio->vui_fd) {
+ /* Group lock held means no lockless any more */
+ if (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
+ io->ci_ignore_lockless = 1;
+
+ if (ll_file_nolock(vio->vui_fd->fd_file) ||
+ (vio->vui_fd->fd_flags & LL_FILE_LOCKLESS_IO &&
+ !io->ci_ignore_lockless))
+ ast_flags |= CEF_NEVER;
+ }
- ccc_io_update_iov(env, cio, io);
+ result = vvp_mmap_locks(env, vio, io);
+ if (result == 0)
+ result = vvp_io_one_lock(env, io, ast_flags, mode, start, end);
- if (io->u.ci_rw.crw_nonblock)
- ast_flags |= CEF_NONBLOCK;
- result = vvp_mmap_locks(env, cio, io);
- if (result == 0)
- result = ccc_io_one_lock(env, io, ast_flags, mode, start, end);
- RETURN(result);
+ RETURN(result);
}
static int vvp_io_read_lock(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct cl_io *io = ios->cis_io;
- struct cl_io_rw_common *rd = &io->u.ci_rd.rd;
+ struct cl_io *io = ios->cis_io;
+ struct cl_io_rw_common *rd = &io->u.ci_rd.rd;
int result;
ENTRY;
/*
* XXX LDLM_FL_CBPENDING
*/
- return ccc_io_one_lock_index
- (env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma),
- io->u.ci_fault.ft_index, io->u.ci_fault.ft_index);
+ return vvp_io_one_lock_index(env,
+ io, 0,
+ vvp_mode_from_vma(vio->u.fault.ft_vma),
+ io->u.ci_fault.ft_index,
+ io->u.ci_fault.ft_index);
}
static int vvp_io_write_lock(const struct lu_env *env,
- const struct cl_io_slice *ios)
+ const struct cl_io_slice *ios)
{
- struct cl_io *io = ios->cis_io;
- loff_t start;
- loff_t end;
-
- if (io->u.ci_wr.wr_append) {
- start = 0;
- end = OBD_OBJECT_EOF;
- } else {
- start = io->u.ci_wr.wr.crw_pos;
- end = start + io->u.ci_wr.wr.crw_count - 1;
- }
- return vvp_io_rw_lock(env, io, CLM_WRITE, start, end);
+ struct cl_io *io = ios->cis_io;
+ loff_t start;
+ loff_t end;
+
+ if (io->u.ci_wr.wr_append) {
+ start = 0;
+ end = OBD_OBJECT_EOF;
+ } else {
+ start = io->u.ci_wr.wr.crw_pos;
+ end = start + io->u.ci_wr.wr.crw_count - 1;
+ }
+
+ RETURN(vvp_io_rw_lock(env, io, CLM_WRITE, start, end));
}
static int vvp_io_setattr_iter_init(const struct lu_env *env,
const struct cl_io_slice *ios)
+
{
return 0;
}
static int vvp_io_setattr_lock(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct ccc_io *cio = ccc_env_io(env);
struct cl_io *io = ios->cis_io;
- __u64 new_size;
+ __u64 lock_start = 0;
+ __u64 lock_end = OBD_OBJECT_EOF;
__u32 enqflags = 0;
- if (cl_io_is_trunc(io)) {
- new_size = io->u.ci_setattr.sa_attr.lvb_size;
- if (new_size == 0)
- enqflags = CEF_DISCARD_DATA;
- } else {
- if ((io->u.ci_setattr.sa_attr.lvb_mtime >=
- io->u.ci_setattr.sa_attr.lvb_ctime) ||
- (io->u.ci_setattr.sa_attr.lvb_atime >=
- io->u.ci_setattr.sa_attr.lvb_ctime))
- return 0;
- new_size = 0;
- }
- cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK;
- return ccc_io_one_lock(env, io, enqflags, CLM_WRITE,
- new_size, OBD_OBJECT_EOF);
+ if (cl_io_is_trunc(io)) {
+ if (io->u.ci_setattr.sa_attr.lvb_size == 0)
+ enqflags = CEF_DISCARD_DATA;
+ } else if (cl_io_is_fallocate(io)) {
+ lock_start = io->u.ci_setattr.sa_falloc_offset;
+ lock_end = lock_start + io->u.ci_setattr.sa_attr.lvb_size;
+ } else {
+ unsigned int valid = io->u.ci_setattr.sa_avalid;
+
+ if (!(valid & TIMES_SET_FLAGS))
+ return 0;
+
+ if ((!(valid & ATTR_MTIME) ||
+ io->u.ci_setattr.sa_attr.lvb_mtime >=
+ io->u.ci_setattr.sa_attr.lvb_ctime) &&
+ (!(valid & ATTR_ATIME) ||
+ io->u.ci_setattr.sa_attr.lvb_atime >=
+ io->u.ci_setattr.sa_attr.lvb_ctime))
+ return 0;
+ }
+
+ return vvp_io_one_lock(env, io, enqflags, CLM_WRITE,
+ lock_start, lock_end);
}
static int vvp_do_vmtruncate(struct inode *inode, size_t size)
return result;
}
-static int vvp_io_setattr_trunc(const struct lu_env *env,
- const struct cl_io_slice *ios,
- struct inode *inode, loff_t size)
-{
- inode_dio_wait(inode);
- return 0;
-}
-
static int vvp_io_setattr_time(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct cl_attr *attr = ccc_env_thread_attr(env);
+ struct cl_attr *attr = vvp_env_thread_attr(env);
int result;
unsigned valid = CAT_CTIME;
- cl_object_attr_lock(obj);
- attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime;
- if (io->u.ci_setattr.sa_valid & ATTR_ATIME_SET) {
- attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime;
- valid |= CAT_ATIME;
- }
- if (io->u.ci_setattr.sa_valid & ATTR_MTIME_SET) {
- attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
- valid |= CAT_MTIME;
- }
- result = cl_object_attr_set(env, obj, attr, valid);
- cl_object_attr_unlock(obj);
+ cl_object_attr_lock(obj);
+ attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime;
+ if (io->u.ci_setattr.sa_avalid & ATTR_ATIME_SET) {
+ attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime;
+ valid |= CAT_ATIME;
+ }
+ if (io->u.ci_setattr.sa_avalid & ATTR_MTIME_SET) {
+ attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
+ valid |= CAT_MTIME;
+ }
+ result = cl_object_attr_update(env, obj, attr, valid);
+ cl_object_attr_unlock(obj);
- return result;
+ return result;
}
static int vvp_io_setattr_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(io->ci_obj);
- int result = 0;
+ struct cl_io *io = ios->cis_io;
+ struct inode *inode = vvp_object_inode(io->ci_obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
- mutex_lock(&inode->i_mutex);
- if (cl_io_is_trunc(io))
- result = vvp_io_setattr_trunc(env, ios, inode,
- io->u.ci_setattr.sa_attr.lvb_size);
- if (result == 0)
- result = vvp_io_setattr_time(env, ios);
- return result;
+ if (cl_io_is_trunc(io)) {
+ trunc_sem_down_write(&lli->lli_trunc_sem);
+ inode_lock(inode);
+ inode_dio_wait(inode);
+ } else if (cl_io_is_fallocate(io)) {
+ inode_lock(inode);
+ inode_dio_wait(inode);
+ } else {
+ inode_lock(inode);
+ }
+
+ if (io->u.ci_setattr.sa_avalid & TIMES_SET_FLAGS)
+ return vvp_io_setattr_time(env, ios);
+
+ return 0;
}
static void vvp_io_setattr_end(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(io->ci_obj);
+ struct cl_io *io = ios->cis_io;
+ struct inode *inode = vvp_object_inode(io->ci_obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
if (cl_io_is_trunc(io)) {
/* Truncate in memory pages - they must be clean pages
* because osc has already notified to destroy osc_extents. */
vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
- inode_dio_write_done(inode);
+ inode_unlock(inode);
+ trunc_sem_up_write(&lli->lli_trunc_sem);
+ } else if (cl_io_is_fallocate(io)) {
+ inode_unlock(inode);
+ } else {
+ inode_unlock(inode);
}
- mutex_unlock(&inode->i_mutex);
}
static void vvp_io_setattr_fini(const struct lu_env *env,
const struct cl_io_slice *ios)
{
+ bool restore_needed = ios->cis_io->ci_restore_needed;
+ struct inode *inode = vvp_object_inode(ios->cis_obj);
+
vvp_io_fini(env, ios);
+
+ if (restore_needed && !ios->cis_io->ci_restore_needed) {
+ /* restore finished, set data modified flag for HSM */
+ ll_file_set_flag(ll_i2info(inode), LLIF_DATA_MODIFIED);
+ }
}
static int vvp_io_read_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
+ const struct cl_io_slice *ios)
{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct ccc_io *cio = cl2ccc_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
- struct ll_ra_read *bead = &vio->cui_bead;
- struct file *file = cio->cui_fd->fd_file;
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = io->ci_obj;
+ struct inode *inode = vvp_object_inode(obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct file *file = vio->vui_fd->fd_file;
+ loff_t pos = io->u.ci_rd.rd.crw_pos;
+ size_t cnt = io->u.ci_rd.rd.crw_count;
+ size_t tot = vio->vui_tot_count;
+ int exceed = 0;
+ int result;
+ ENTRY;
- int result;
- loff_t pos = io->u.ci_rd.rd.crw_pos;
- long cnt = io->u.ci_rd.rd.crw_count;
- long tot = cio->cui_tot_count;
- int exceed = 0;
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ CDEBUG(D_VFSTRACE, "%s: read [%llu, %llu)\n",
+ file_dentry(file)->d_name.name,
+ pos, pos + cnt);
- CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
+ if (vio->vui_io_subtype == IO_NORMAL)
+ trunc_sem_down_read(&lli->lli_trunc_sem);
+
+ if (io->ci_async_readahead) {
+ file_accessed(file);
+ RETURN(0);
+ }
if (!can_populate_pages(env, io, inode))
- return 0;
+ RETURN(0);
- result = ccc_prep_size(env, obj, io, pos, tot, &exceed);
- if (result != 0)
- return result;
- else if (exceed != 0)
- goto out;
-
- LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
- "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
- inode->i_ino, cnt, pos, i_size_read(inode));
-
- /* turn off the kernel's read-ahead */
- cio->cui_fd->fd_file->f_ra.ra_pages = 0;
-
- /* initialize read-ahead window once per syscall */
- if (!vio->cui_ra_window_set) {
- vio->cui_ra_window_set = 1;
- bead->lrr_start = cl_index(obj, pos);
- bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
- ll_ra_read_in(file, bead);
- }
+ /* Unless this is reading a sparse file, otherwise the lock has already
+ * been acquired so vvp_prep_size() is an empty op. */
+ result = vvp_prep_size(env, obj, io, pos, cnt, &exceed);
+ if (result != 0)
+ RETURN(result);
+ else if (exceed != 0)
+ GOTO(out, result);
+
+ LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
+ "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
+ inode->i_ino, cnt, pos, i_size_read(inode));
+
+ /* turn off the kernel's read-ahead */
+ vio->vui_fd->fd_file->f_ra.ra_pages = 0;
+
+ /* initialize read-ahead window once per syscall */
+ if (!vio->vui_ra_valid) {
+ vio->vui_ra_valid = true;
+ vio->vui_ra_start_idx = cl_index(obj, pos);
+ vio->vui_ra_pages = cl_index(obj, tot + PAGE_SIZE - 1);
+ /* If both start and end are unaligned, we read one more page
+ * than the index math suggests. */
+ if ((pos & ~PAGE_MASK) != 0 && ((pos + tot) & ~PAGE_MASK) != 0)
+ vio->vui_ra_pages++;
+
+ CDEBUG(D_READA, "tot %zu, ra_start %lu, ra_count %lu\n",
+ tot, vio->vui_ra_start_idx, vio->vui_ra_pages);
+ }
- /* BUG: 5972 */
- file_accessed(file);
- switch (vio->cui_io_subtype) {
- case IO_NORMAL:
- LASSERT(cio->cui_iocb->ki_pos == pos);
- result = generic_file_aio_read(cio->cui_iocb,
- cio->cui_iov, cio->cui_nrsegs,
- cio->cui_iocb->ki_pos);
+ /* BUG: 5972 */
+ file_accessed(file);
+ switch (vio->vui_io_subtype) {
+ case IO_NORMAL:
+ LASSERT(vio->vui_iocb->ki_pos == pos);
+ result = generic_file_read_iter(vio->vui_iocb, vio->vui_iter);
break;
- case IO_SPLICE:
- result = generic_file_splice_read(file, &pos,
- vio->u.splice.cui_pipe, cnt,
- vio->u.splice.cui_flags);
- /* LU-1109: do splice read stripe by stripe otherwise if it
- * may make nfsd stuck if this read occupied all internal pipe
- * buffers. */
- io->ci_continue = 0;
- break;
- default:
- CERROR("Wrong IO type %u\n", vio->cui_io_subtype);
- LBUG();
- }
+ case IO_SPLICE:
+ result = generic_file_splice_read(file, &pos,
+ vio->u.splice.vui_pipe, cnt,
+ vio->u.splice.vui_flags);
+ /* LU-1109: do splice read stripe by stripe otherwise if it
+ * may make nfsd stuck if this read occupied all internal pipe
+ * buffers. */
+ io->ci_continue = 0;
+ break;
+ default:
+ CERROR("Wrong IO type %u\n", vio->vui_io_subtype);
+ LBUG();
+ }
+ GOTO(out, result);
out:
if (result >= 0) {
if (result < cnt)
io->ci_continue = 0;
io->ci_nob += result;
- ll_rw_stats_tally(ll_i2sbi(inode), current->pid, cio->cui_fd,
- pos, result, READ);
result = 0;
}
return result;
}
-static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct ccc_io *cio = cl2ccc_io(env, ios);
-
- if (vio->cui_ra_window_set)
- ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead);
-
- vvp_io_fini(env, ios);
-}
-
static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
struct cl_page_list *plist, int from, int to)
{
RETURN(bytes > 0 ? bytes : rc);
}
+/*
+ * Kernels 4.2 - 4.5 pass memcg argument to account_page_dirtied()
+ * Kernel v5.2-5678-gac1c3e4 no longer exports account_page_dirtied
+ */
+static inline void ll_account_page_dirtied(struct page *page,
+ struct address_space *mapping)
+{
+#ifdef HAVE_ACCOUNT_PAGE_DIRTIED_3ARGS
+ struct mem_cgroup *memcg = mem_cgroup_begin_page_stat(page);
+
+ account_page_dirtied(page, mapping, memcg);
+ mem_cgroup_end_page_stat(memcg);
+#elif defined(HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT)
+ account_page_dirtied(page, mapping);
+#else
+ vvp_account_page_dirtied(page, mapping);
+#endif
+}
+
+/*
+ * From kernel v4.19-rc5-248-g9b89a0355144 use XArrary
+ * Prior kernels use radix_tree for tags
+ */
+static inline void ll_page_tag_dirty(struct page *page,
+ struct address_space *mapping)
+{
+#ifndef HAVE_RADIX_TREE_TAG_SET
+ __xa_set_mark(&mapping->i_pages, page_index(page), PAGECACHE_TAG_DIRTY);
+#else
+ radix_tree_tag_set(&mapping->page_tree, page_index(page),
+ PAGECACHE_TAG_DIRTY);
+#endif
+}
+
+/* Taken from kernel set_page_dirty, __set_page_dirty_nobuffers
+ * Last change to this area: b93b016313b3ba8003c3b8bb71f569af91f19fc7
+ *
+ * Current with Linus tip of tree (7/13/2019):
+ * v5.2-rc4-224-ge01e060fe0
+ *
+ * Backwards compat for 3.x, 5.x kernels relating to memcg handling
+ * & rename of radix tree to xarray.
+ */
+void vvp_set_pagevec_dirty(struct pagevec *pvec)
+{
+ struct page *page = pvec->pages[0];
+ struct address_space *mapping = page->mapping;
+ unsigned long flags;
+ int count = pagevec_count(pvec);
+ int dirtied = 0;
+ int i = 0;
+
+ ENTRY;
+
+ /* From set_page_dirty */
+ for (i = 0; i < count; i++)
+ ClearPageReclaim(pvec->pages[i]);
+
+ LASSERTF(page->mapping,
+ "mapping must be set. page %p, page->private (cl_page) %p\n",
+ page, (void *) page->private);
+
+ /* Rest of code derived from __set_page_dirty_nobuffers */
+ ll_xa_lock_irqsave(&mapping->i_pages, flags);
+
+ /* Notes on differences with __set_page_dirty_nobuffers:
+ * 1. We don't need to call page_mapping because we know this is a page
+ * cache page.
+ * 2. We have the pages locked, so there is no need for the careful
+ * mapping/mapping2 dance.
+ * 3. No mapping is impossible. (Race w/truncate mentioned in
+ * dirty_nobuffers should be impossible because we hold the page lock.)
+ * 4. All mappings are the same because i/o is only to one file.
+ * 5. We invert the lock order on lock_page_memcg(page) and the mapping
+ * xa_lock, but this is the only function that should use that pair of
+ * locks and it can't race because Lustre locks pages throughout i/o.
+ */
+ for (i = 0; i < count; i++) {
+ page = pvec->pages[i];
+ lock_page_memcg(page);
+ if (TestSetPageDirty(page)) {
+ unlock_page_memcg(page);
+ continue;
+ }
+ LASSERTF(page->mapping == mapping,
+ "all pages must have the same mapping. page %p, mapping %p, first mapping %p\n",
+ page, page->mapping, mapping);
+ WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
+ ll_account_page_dirtied(page, mapping);
+ ll_page_tag_dirty(page, mapping);
+ dirtied++;
+ unlock_page_memcg(page);
+ }
+ ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
+
+ CDEBUG(D_VFSTRACE, "mapping %p, count %d, dirtied %d\n", mapping,
+ count, dirtied);
+
+ if (mapping->host && dirtied) {
+ /* !PageAnon && !swapper_space */
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+ }
+
+ EXIT;
+}
+
static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page)
+ struct pagevec *pvec)
{
- struct ccc_page *cp;
- struct page *vmpage = page->cp_vmpage;
- struct cl_object *clob = cl_io_top(io)->ci_obj;
+ int count = 0;
+ int i = 0;
+
+ ENTRY;
- SetPageUptodate(vmpage);
- set_page_dirty(vmpage);
+ count = pagevec_count(pvec);
+ LASSERT(count > 0);
- cp = cl2ccc_page(cl_object_page_slice(clob, page));
- vvp_write_pending(cl2ccc(clob), cp);
+ for (i = 0; i < count; i++) {
+ struct page *vmpage = pvec->pages[i];
+ SetPageUptodate(vmpage);
+ }
+
+ vvp_set_pagevec_dirty(pvec);
- cl_page_disown(env, io, page);
+ for (i = 0; i < count; i++) {
+ struct page *vmpage = pvec->pages[i];
+ struct cl_page *page = (struct cl_page *) vmpage->private;
+ cl_page_disown(env, io, page);
+ lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
+ cl_page_put(env, page);
+ }
- /* held in ll_cl_init() */
- lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
- cl_page_put(env, page);
+ EXIT;
}
/* make sure the page list is contiguous */
pgoff_t index = CL_PAGE_EOF;
cl_page_list_for_each(page, plist) {
- struct ccc_page *cp = cl_object_page_slice(obj, page);
+ struct vvp_page *vpg = cl_object_page_slice(obj, page);
if (index == CL_PAGE_EOF) {
- index = ccc_index(cp);
+ index = vvp_index(vpg);
continue;
}
++index;
- if (index == ccc_index(cp))
+ if (index == vvp_index(vpg))
continue;
return false;
int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
{
struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
- struct ccc_io *cio = ccc_env_io(env);
- struct cl_page_list *queue = &cio->u.write.cui_queue;
+ struct inode *inode = vvp_object_inode(obj);
+ struct vvp_io *vio = vvp_env_io(env);
+ struct cl_page_list *queue = &vio->u.write.vui_queue;
struct cl_page *page;
int rc = 0;
int bytes = 0;
- unsigned int npages = cio->u.write.cui_queue.pl_nr;
+ unsigned int npages = vio->u.write.vui_queue.pl_nr;
ENTRY;
if (npages == 0)
RETURN(0);
CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n",
- npages, cio->u.write.cui_from, cio->u.write.cui_to);
+ npages, vio->u.write.vui_from, vio->u.write.vui_to);
LASSERT(page_list_sanity_check(obj, queue));
/* submit IO with async write */
rc = cl_io_commit_async(env, io, queue,
- cio->u.write.cui_from, cio->u.write.cui_to,
+ vio->u.write.vui_from, vio->u.write.vui_to,
write_commit_callback);
npages -= queue->pl_nr; /* already committed pages */
if (npages > 0) {
bytes = npages << PAGE_SHIFT;
/* first page */
- bytes -= cio->u.write.cui_from;
+ bytes -= vio->u.write.vui_from;
if (queue->pl_nr == 0) /* last page */
- bytes -= PAGE_SIZE - cio->u.write.cui_to;
+ bytes -= PAGE_SIZE - vio->u.write.vui_to;
LASSERTF(bytes > 0, "bytes = %d, pages = %d\n", bytes, npages);
- cio->u.write.cui_written += bytes;
+ vio->u.write.vui_written += bytes;
CDEBUG(D_VFSTRACE, "Committed %d pages %d bytes, tot: %ld\n",
- npages, bytes, cio->u.write.cui_written);
+ npages, bytes, vio->u.write.vui_written);
/* the first page must have been written. */
- cio->u.write.cui_from = 0;
+ vio->u.write.vui_from = 0;
}
LASSERT(page_list_sanity_check(obj, queue));
LASSERT(ergo(rc == 0, queue->pl_nr == 0));
/* out of quota, try sync write */
if (rc == -EDQUOT && !cl_io_is_mkwrite(io)) {
rc = vvp_io_commit_sync(env, io, queue,
- cio->u.write.cui_from,
- cio->u.write.cui_to);
+ vio->u.write.vui_from,
+ vio->u.write.vui_to);
if (rc > 0) {
- cio->u.write.cui_written += rc;
+ vio->u.write.vui_written += rc;
rc = 0;
}
}
static int vvp_io_write_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct ccc_io *cio = cl2ccc_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
- ssize_t result = 0;
- loff_t pos = io->u.ci_wr.wr.crw_pos;
- size_t cnt = io->u.ci_wr.wr.crw_count;
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = io->ci_obj;
+ struct inode *inode = vvp_object_inode(obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct file *file = vio->vui_fd->fd_file;
+ ssize_t result = 0;
+ loff_t pos = io->u.ci_wr.wr.crw_pos;
+ size_t cnt = io->u.ci_wr.wr.crw_count;
+ bool lock_inode = !IS_NOSEC(inode);
+ size_t nob = io->ci_nob;
+ struct iov_iter iter;
+ size_t written = 0;
+
+ ENTRY;
- ENTRY;
+ if (vio->vui_io_subtype == IO_NORMAL)
+ trunc_sem_down_read(&lli->lli_trunc_sem);
if (!can_populate_pages(env, io, inode))
RETURN(0);
- if (cl_io_is_append(io)) {
- /*
- * PARALLEL IO This has to be changed for parallel IO doing
- * out-of-order writes.
- */
+ if (cl_io_is_append(io)) {
+ /*
+ * PARALLEL IO This has to be changed for parallel IO doing
+ * out-of-order writes.
+ */
ll_merge_attr(env, inode);
- pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
- cio->cui_iocb->ki_pos = pos;
- } else {
- LASSERT(cio->cui_iocb->ki_pos == pos);
+ pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
+ vio->vui_iocb->ki_pos = pos;
+ } else {
+ LASSERTF(vio->vui_iocb->ki_pos == pos,
+ "ki_pos %lld [%lld, %lld)\n",
+ vio->vui_iocb->ki_pos,
+ pos, pos + cnt);
}
- CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
+ CDEBUG(D_VFSTRACE, "%s: write [%llu, %llu)\n",
+ file_dentry(file)->d_name.name,
+ pos, pos + cnt);
+
+ /* The maximum Lustre file size is variable, based on the OST maximum
+ * object size and number of stripes. This needs another check in
+ * addition to the VFS checks earlier. */
+ if (pos + cnt > ll_file_maxbytes(inode)) {
+ CDEBUG(D_INODE,
+ "%s: file %s ("DFID") offset %llu > maxbytes %llu\n",
+ ll_i2sbi(inode)->ll_fsname,
+ file_dentry(file)->d_name.name,
+ PFID(ll_inode2fid(inode)), pos + cnt,
+ ll_file_maxbytes(inode));
+ RETURN(-EFBIG);
+ }
+
+ /* Tests to verify we take the i_mutex correctly */
+ if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_IMUTEX_SEC) && !lock_inode)
+ RETURN(-EINVAL);
- if (cio->cui_iov == NULL) {
+ if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_IMUTEX_NOSEC) && lock_inode)
+ RETURN(-EINVAL);
+
+ if (vio->vui_iter == NULL) {
/* from a temp io in ll_cl_init(). */
result = 0;
} else {
* consistency, proper locking to protect against writes,
* trucates, etc. is handled in the higher layers of lustre.
*/
- result = __generic_file_aio_write(cio->cui_iocb,
- cio->cui_iov, cio->cui_nrsegs,
- &cio->cui_iocb->ki_pos);
- if (result > 0 || result == -EIOCBQUEUED) {
+ lock_inode = !IS_NOSEC(inode);
+ iter = *vio->vui_iter;
+
+ if (unlikely(lock_inode))
+ inode_lock(inode);
+ result = __generic_file_write_iter(vio->vui_iocb,
+ vio->vui_iter);
+ if (unlikely(lock_inode))
+ inode_unlock(inode);
+
+ written = result;
+ if (result > 0)
+#ifdef HAVE_GENERIC_WRITE_SYNC_2ARGS
+ result = generic_write_sync(vio->vui_iocb, result);
+#else
+ {
ssize_t err;
- err = generic_write_sync(cio->cui_iocb->ki_filp,
- pos, result);
+ err = generic_write_sync(vio->vui_iocb->ki_filp, pos,
+ result);
if (err < 0 && result > 0)
result = err;
}
-
+#endif
}
+
if (result > 0) {
result = vvp_io_write_commit(env, io);
- if (cio->u.write.cui_written > 0) {
- result = cio->u.write.cui_written;
- io->ci_nob += result;
-
- CDEBUG(D_VFSTRACE, "write: nob %zd, result: %zd\n",
+ /* Simulate short commit */
+ if (CFS_FAULT_CHECK(OBD_FAIL_LLITE_SHORT_COMMIT)) {
+ vio->u.write.vui_written >>= 1;
+ if (vio->u.write.vui_written > 0)
+ io->ci_need_restart = 1;
+ }
+ if (vio->u.write.vui_written > 0) {
+ result = vio->u.write.vui_written;
+ CDEBUG(D_VFSTRACE, "%s: write nob %zd, result: %zd\n",
+ file_dentry(file)->d_name.name,
io->ci_nob, result);
+ io->ci_nob += result;
+ } else {
+ io->ci_continue = 0;
}
}
- if (result > 0) {
- struct ll_inode_info *lli = ll_i2info(inode);
-
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
+ if (vio->vui_iocb->ki_pos != (pos + io->ci_nob - nob)) {
+ CDEBUG(D_VFSTRACE, "%s: write position mismatch: "
+ "ki_pos %lld vs. pos %lld, written %ld, commit %ld "
+ "rc %ld\n",
+ file_dentry(file)->d_name.name,
+ vio->vui_iocb->ki_pos, pos + io->ci_nob - nob,
+ written, io->ci_nob - nob, result);
+ /*
+ * Rewind ki_pos and vui_iter to where it has
+ * successfully committed.
+ */
+ vio->vui_iocb->ki_pos = pos + io->ci_nob - nob;
+ iov_iter_advance(&iter, io->ci_nob - nob);
+ vio->vui_iter->iov = iter.iov;
+ vio->vui_iter->nr_segs = iter.nr_segs;
+ vio->vui_iter->iov_offset = iter.iov_offset;
+ vio->vui_iter->count = iter.count;
+ }
+ if (result > 0 || result == -EIOCBQUEUED) {
+ ll_file_set_flag(ll_i2info(inode), LLIF_DATA_MODIFIED);
if (result < cnt)
io->ci_continue = 0;
- ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
- cio->cui_fd, pos, result, WRITE);
- result = 0;
+ if (result > 0)
+ result = 0;
}
RETURN(result);
}
+static void vvp_io_rw_end(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct inode *inode = vvp_object_inode(ios->cis_obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ if (vio->vui_io_subtype == IO_NORMAL)
+ trunc_sem_up_read(&lli->lli_trunc_sem);
+}
+
static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
{
- struct vm_fault *vmf = cfio->fault.ft_vmf;
+ struct vm_fault *vmf = cfio->ft_vmf;
- cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
- cfio->fault.ft_flags_valid = 1;
+ cfio->ft_flags = ll_filemap_fault(cfio->ft_vma, vmf);
+ cfio->ft_flags_valid = 1;
- if (vmf->page) {
- LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
- vmf->virtual_address);
- if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
- lock_page(vmf->page);
- cfio->fault.ft_flags |= VM_FAULT_LOCKED;
- }
+ if (vmf->page) {
+ LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
+ get_vmf_address(vmf));
+ if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) {
+ lock_page(vmf->page);
+ cfio->ft_flags |= VM_FAULT_LOCKED;
+ }
- cfio->ft_vmpage = vmf->page;
- return 0;
- }
+ cfio->ft_vmpage = vmf->page;
- if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
- CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
- return -EFAULT;
- }
+ return 0;
+ }
- if (cfio->fault.ft_flags & VM_FAULT_OOM) {
- CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
- return -ENOMEM;
- }
+ if (cfio->ft_flags & VM_FAULT_SIGBUS) {
+ CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", get_vmf_address(vmf));
+ return -EFAULT;
+ }
- if (cfio->fault.ft_flags & VM_FAULT_RETRY)
- return -EAGAIN;
+ if (cfio->ft_flags & VM_FAULT_OOM) {
+ CDEBUG(D_PAGE, "got addr %p - OOM\n", get_vmf_address(vmf));
+ return -ENOMEM;
+ }
+
+ if (cfio->ft_flags & VM_FAULT_RETRY)
+ return -EAGAIN;
+
+ CERROR("unknown error in page fault %d\n", cfio->ft_flags);
- CERROR("unknow error in page fault %d!\n", cfio->fault.ft_flags);
- return -EINVAL;
+ return -EINVAL;
}
static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page)
+ struct pagevec *pvec)
{
- struct ccc_page *cp;
- struct cl_object *clob = cl_io_top(io)->ci_obj;
-
- set_page_dirty(page->cp_vmpage);
-
- cp = cl2ccc_page(cl_object_page_slice(clob, page));
- vvp_write_pending(cl2ccc(clob), cp);
+ vvp_set_pagevec_dirty(pvec);
}
static int vvp_io_fault_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
- struct cl_fault_io *fio = &io->u.ci_fault;
- struct vvp_fault_io *cfio = &vio->u.fault;
- loff_t offset;
- int result = 0;
- struct page *vmpage = NULL;
- struct cl_page *page;
- loff_t size;
- pgoff_t last_index;
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = io->ci_obj;
+ struct inode *inode = vvp_object_inode(obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct cl_fault_io *fio = &io->u.ci_fault;
+ struct vvp_fault_io *cfio = &vio->u.fault;
+ loff_t offset;
+ int result = 0;
+ struct page *vmpage = NULL;
+ struct cl_page *page;
+ loff_t size;
+ pgoff_t last_index;
ENTRY;
- if (fio->ft_executable &&
- LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime)
- CWARN("binary "DFID
- " changed while waiting for the page fault lock\n",
- PFID(lu_object_fid(&obj->co_lu)));
+ trunc_sem_down_read_nowait(&lli->lli_trunc_sem);
/* offset of the last byte on the page */
offset = cl_offset(obj, fio->ft_index + 1) - 1;
LASSERT(cl_index(obj, offset) == fio->ft_index);
- result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
+ result = vvp_prep_size(env, obj, io, 0, offset + 1, NULL);
if (result != 0)
RETURN(result);
LASSERT(PageLocked(vmpage));
if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE))
- ll_invalidate_page(vmpage);
+ generic_error_remove_page(vmpage->mapping, vmpage);
size = i_size_read(inode);
/* Though we have already held a cl_lock upon this page, but
if (fio->ft_mkwrite) {
wait_on_page_writeback(vmpage);
if (!PageDirty(vmpage)) {
- struct cl_page_list *plist = &io->ci_queue.c2_qin;
- struct ccc_page *cp = cl_object_page_slice(obj, page);
+ struct cl_page_list *plist = &vio->u.fault.ft_queue;
+ struct vvp_page *vpg = cl_object_page_slice(obj, page);
int to = PAGE_SIZE;
/* vvp_page_assume() calls wait_on_page_writeback(). */
cl_page_list_add(plist, page);
/* size fixup */
- if (last_index == ccc_index(cp))
- to = size & ~CFS_PAGE_MASK;
+ if (last_index == vvp_index(vpg))
+ to = ((size - 1) & ~PAGE_MASK) + 1;
/* Do not set Dirty bit here so that in case IO is
* started before the page is really made dirty, we
* still have chance to detect it. */
result = cl_io_commit_async(env, io, plist, 0, to,
mkwrite_commit_callback);
+ /* Have overquota flag, trying sync write to check
+ * whether indeed out of quota */
+ if (result == -EDQUOT) {
+ cl_page_get(page);
+ result = vvp_io_commit_sync(env, io,
+ plist, 0, to);
+ if (result >= 0) {
+ io->ci_noquota = 1;
+ cl_page_own(env, io, page);
+ cl_page_list_add(plist, page);
+ lu_ref_add(&page->cp_reference,
+ "cl_io", io);
+ result = cl_io_commit_async(env, io,
+ plist, 0, to,
+ mkwrite_commit_callback);
+ io->ci_noquota = 0;
+ } else {
+ cl_page_put(env, page);
+ }
+ }
+
LASSERT(cl_page_is_owned(page, io));
cl_page_list_fini(env, plist);
if (result == -EDQUOT)
result = -ENOSPC;
GOTO(out, result);
- } else
+ } else {
cl_page_disown(env, io, page);
+ }
}
}
/* return unlocked vmpage to avoid deadlocking */
if (vmpage != NULL)
unlock_page(vmpage);
- cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
+
+ cfio->ft_flags &= ~VM_FAULT_LOCKED;
+
return result;
}
+static void vvp_io_fault_end(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct inode *inode = vvp_object_inode(ios->cis_obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ CLOBINVRNT(env, ios->cis_io->ci_obj,
+ vvp_object_invariant(ios->cis_io->ci_obj));
+ trunc_sem_up_read(&lli->lli_trunc_sem);
+}
+
static int vvp_io_fsync_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
return 0;
}
-static int vvp_io_read_page(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice)
+static int vvp_io_read_ahead(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ pgoff_t start, struct cl_read_ahead *ra)
{
- struct cl_io *io = ios->cis_io;
- struct ccc_page *cp = cl2ccc_page(slice);
- struct cl_page *page = slice->cpl_page;
- struct inode *inode = ccc_object_inode(slice->cpl_obj);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd;
- struct ll_readahead_state *ras = &fd->fd_ras;
- struct cl_2queue *queue = &io->ci_queue;
-
+ int result = 0;
ENTRY;
- if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
- sbi->ll_ra_info.ra_max_pages > 0)
- ras_update(sbi, inode, ras, ccc_index(cp),
- cp->cpg_defer_uptodate);
-
- if (cp->cpg_defer_uptodate) {
- cp->cpg_ra_used = 1;
- cl_page_export(env, page, 1);
- }
+ if (ios->cis_io->ci_type == CIT_READ ||
+ ios->cis_io->ci_type == CIT_FAULT) {
+ struct vvp_io *vio = cl2vvp_io(env, ios);
- /*
- * Add page into the queue even when it is marked uptodate above.
- * this will unlock it automatically as part of cl_page_list_disown().
- */
- cl_2queue_add(queue, page);
- if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
- sbi->ll_ra_info.ra_max_pages > 0)
- ll_readahead(env, io, &queue->c2_qin, ras,
- cp->cpg_defer_uptodate);
+ if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ ra->cra_end_idx = CL_PAGE_EOF;
+ result = +1; /* no need to call down */
+ }
+ }
- RETURN(0);
+ RETURN(result);
}
static const struct cl_io_operations vvp_io_ops = {
- .op = {
- [CIT_READ] = {
- .cio_fini = vvp_io_read_fini,
- .cio_lock = vvp_io_read_lock,
- .cio_start = vvp_io_read_start,
- .cio_advance = ccc_io_advance
- },
+ .op = {
+ [CIT_READ] = {
+ .cio_fini = vvp_io_fini,
+ .cio_lock = vvp_io_read_lock,
+ .cio_start = vvp_io_read_start,
+ .cio_end = vvp_io_rw_end,
+ .cio_advance = vvp_io_advance,
+ },
[CIT_WRITE] = {
.cio_fini = vvp_io_fini,
.cio_iter_init = vvp_io_write_iter_init,
.cio_iter_fini = vvp_io_write_iter_fini,
.cio_lock = vvp_io_write_lock,
.cio_start = vvp_io_write_start,
- .cio_advance = ccc_io_advance
+ .cio_end = vvp_io_rw_end,
+ .cio_advance = vvp_io_advance,
},
[CIT_SETATTR] = {
.cio_fini = vvp_io_setattr_fini,
.cio_iter_init = vvp_io_fault_iter_init,
.cio_lock = vvp_io_fault_lock,
.cio_start = vvp_io_fault_start,
- .cio_end = ccc_io_end
+ .cio_end = vvp_io_fault_end,
},
[CIT_FSYNC] = {
- .cio_start = vvp_io_fsync_start,
- .cio_fini = vvp_io_fini
+ .cio_start = vvp_io_fsync_start,
+ .cio_fini = vvp_io_fini
+ },
+ [CIT_GLIMPSE] = {
+ .cio_fini = vvp_io_fini
},
- [CIT_MISC] = {
- .cio_fini = vvp_io_fini
- }
- },
- .cio_read_page = vvp_io_read_page,
+ [CIT_MISC] = {
+ .cio_fini = vvp_io_fini
+ },
+ [CIT_LADVISE] = {
+ .cio_fini = vvp_io_fini
+ },
+ },
+ .cio_read_ahead = vvp_io_read_ahead
};
int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
struct cl_io *io)
{
struct vvp_io *vio = vvp_env_io(env);
- struct ccc_io *cio = ccc_env_io(env);
- struct inode *inode = ccc_object_inode(obj);
- int result;
+ struct inode *inode = vvp_object_inode(obj);
+ int result;
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- ENTRY;
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+ ENTRY;
CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d "
- "restore needed %d\n",
+ "restore needed %d\n",
PFID(lu_object_fid(&obj->co_lu)),
io->ci_ignore_layout, io->ci_verify_layout,
- cio->cui_layout_gen, io->ci_restore_needed);
+ vio->vui_layout_gen, io->ci_restore_needed);
- CL_IO_SLICE_CLEAN(cio, cui_cl);
- cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
- vio->cui_ra_window_set = 0;
+ CL_IO_SLICE_CLEAN(vio, vui_cl);
+ cl_io_slice_add(io, &vio->vui_cl, obj, &vvp_io_ops);
+ vio->vui_ra_valid = false;
result = 0;
if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
size_t count;
struct ll_inode_info *lli = ll_i2info(inode);
- count = io->u.ci_rw.crw_count;
- /* "If nbyte is 0, read() will return 0 and have no other
- * results." -- Single Unix Spec */
- if (count == 0)
- result = 1;
- else {
- cio->cui_tot_count = count;
- cio->cui_tot_nrsegs = 0;
- }
+ count = io->u.ci_rw.crw_count;
+ /* "If nbyte is 0, read() will return 0 and have no other
+ * results." -- Single Unix Spec */
+ if (count == 0)
+ result = 1;
+ else
+ vio->vui_tot_count = count;
/* for read/write, we store the jobid in the inode, and
* it'll be fetched by osc when building RPC.
* it's not accurate if the file is shared by different
* jobs.
*/
- lustre_get_jobid(lli->lli_jobid);
+ lustre_get_jobid(lli->lli_jobid, sizeof(lli->lli_jobid));
} else if (io->ci_type == CIT_SETATTR) {
if (!cl_io_is_trunc(io))
io->ci_lockreq = CILR_MANDATORY;
}
- /* ignore layout change for generic CIT_MISC but not for glimpse.
- * io context for glimpse must set ci_verify_layout to true,
- * see cl_glimpse_size0() for details. */
- if (io->ci_type == CIT_MISC && !io->ci_verify_layout)
- io->ci_ignore_layout = 1;
-
/* Enqueue layout lock and get layout version. We need to do this
* even for operations requiring to open file, such as read and write,
* because it might not grant layout lock in IT_OPEN. */
if (result == 0 && !io->ci_ignore_layout) {
- result = ll_layout_refresh(inode, &cio->cui_layout_gen);
+ result = ll_layout_refresh(inode, &vio->vui_layout_gen);
if (result == -ENOENT)
/* If the inode on MDS has been removed, but the objects
* on OSTs haven't been destroyed (async unlink), layout
result = 0;
if (result < 0)
CERROR("%s: refresh file layout " DFID " error %d.\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(lu_object_fid(&obj->co_lu)), result);
+ ll_i2sbi(inode)->ll_fsname,
+ PFID(lu_object_fid(&obj->co_lu)), result);
}
+ io->ci_result = result < 0 ? result : 0;
RETURN(result);
}
-
-static struct vvp_io *cl2vvp_io(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- /* Caling just for assertion */
- cl2ccc_io(env, slice);
- return vvp_env_io(env);
-}