* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
return vio->cui_io_subtype == IO_NORMAL;
}
+/**
+ * For swapping layout. The file's layout may have changed.
+ * To avoid populating pages to a wrong stripe, we have to verify the
+ * correctness of layout. It works because swapping layout processes
+ * have to acquire group lock.
+ */
+static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
+ struct inode *inode)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ccc_io *cio = ccc_env_io(env);
+ bool rc = true;
+
+ switch (io->ci_type) {
+ case CIT_READ:
+ case CIT_WRITE:
+ /* don't need lock here to check lli_layout_gen as we have held
+ * extent lock and GROUP lock has to hold to swap layout */
+ if (lli->lli_layout_gen != cio->cui_layout_gen) {
+ io->ci_need_restart = 1;
+ /* this will return application a short read/write */
+ io->ci_continue = 0;
+ rc = false;
+ }
+ case CIT_FAULT:
+ /* fault is okay because we've already had a page. */
+ default:
+ break;
+ }
+
+ return rc;
+}
+
/*****************************************************************************
*
* io operations.
static int vvp_io_setattr_iter_init(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct ccc_io *cio = ccc_env_io(env);
- struct inode *inode = ccc_object_inode(ios->cis_obj);
-
- /*
- * We really need to get our PW lock before we change inode->i_size.
- * If we don't we can race with other i_size updaters on our node,
- * like ll_file_read. We can also race with i_size propogation to
- * other nodes through dirtying and writeback of final cached pages.
- * This last one is especially bad for racing o_append users on other
- * nodes.
- */
- if (cl_io_is_trunc(ios->cis_io))
- inode_dio_write_done(inode);
- mutex_unlock(&inode->i_mutex);
- cio->u.setattr.cui_locks_released = 1;
return 0;
}
static int vvp_io_setattr_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct ccc_io *cio = cl2ccc_io(env, ios);
struct cl_io *io = ios->cis_io;
struct inode *inode = ccc_object_inode(io->ci_obj);
- LASSERT(cio->u.setattr.cui_locks_released);
-
mutex_lock(&inode->i_mutex);
- cio->u.setattr.cui_locks_released = 0;
-
if (cl_io_is_trunc(io))
return vvp_io_setattr_trunc(env, ios, inode,
io->u.ci_setattr.sa_attr.lvb_size);
static void vvp_io_setattr_end(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(io->ci_obj);
+ struct cl_io *io = ios->cis_io;
+ struct inode *inode = ccc_object_inode(io->ci_obj);
- if (!cl_io_is_trunc(io))
- return;
-
- /* Truncate in memory pages - they must be clean pages because osc
- * has already notified to destroy osc_extents. */
- vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
+ if (cl_io_is_trunc(io)) {
+ /* Truncate in memory pages - they must be clean pages
+ * because osc has already notified to destroy osc_extents. */
+ vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
+ inode_dio_write_done(inode);
+ }
+ mutex_unlock(&inode->i_mutex);
}
static void vvp_io_setattr_fini(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct ccc_io *cio = ccc_env_io(env);
- struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(ios->cis_io->ci_obj);
-
- if (cio->u.setattr.cui_locks_released) {
- mutex_lock(&inode->i_mutex);
- if (cl_io_is_trunc(io))
- inode_dio_wait(inode);
- cio->u.setattr.cui_locks_released = 0;
- }
vvp_io_fini(env, ios);
}
CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
+ if (!can_populate_pages(env, io, inode))
+ return 0;
+
result = ccc_prep_size(env, obj, io, pos, tot, &exceed);
if (result != 0)
return result;
ENTRY;
+ if (!can_populate_pages(env, io, inode))
+ return 0;
+
if (cl_io_is_append(io)) {
/*
* PARALLEL IO This has to be changed for parallel IO doing
struct cl_page *pg = slice->cpl_page;
struct inode *inode = ccc_object_inode(obj);
struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
cfs_page_t *vmpage = cp->cpg_page;
int result;
}
ll_stats_ops_tally(sbi, tallyop, 1);
+ /* Inode should be marked DIRTY even if no new page was marked DIRTY
+ * because page could have been not flushed between 2 modifications.
+ * It is important the file is marked DIRTY as soon as the I/O is done
+ * Indeed, when cache is flushed, file could be already closed and it
+ * is too late to warn the MDT.
+ * It is acceptable that file is marked DIRTY even if I/O is dropped
+ * for some reasons before being flushed to OST.
+ */
+ if (result == 0) {
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags |= LLIF_DATA_MODIFIED;
+ spin_unlock(&lli->lli_lock);
+ }
+
size = cl_offset(obj, pg->cp_index) + to;
ll_inode_size_lock(inode);
/* Enqueue layout lock and get layout version. We need to do this
* even for operations requiring to open file, such as read and write,
* because it might not grant layout lock in IT_OPEN. */
- if (result == 0 && !io->ci_ignore_layout)
+ if (result == 0 && !io->ci_ignore_layout) {
result = ll_layout_refresh(inode, &cio->cui_layout_gen);
+ if (result == -ENOENT)
+ /* If the inode on MDS has been removed, but the objects
+ * on OSTs haven't been destroyed (async unlink), layout
+ * fetch will return -ENOENT, we'd ingore this error
+ * and continue with dirty flush. LU-3230. */
+ result = 0;
+ if (result < 0)
+ CERROR("%s: refresh file layout " DFID " error %d.\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(lu_object_fid(&obj->co_lu)), result);
+ }
RETURN(result);
}