summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
8d334f6)
Don't return error again when close if the application has known
former write failure.
Rename the per-inode based 'lli_write_rc' as per file-structure
based 'fd_last_write' to track the last write/fsync failure.
When 'sys_close()' is called against the 'file' structure,
we know whether the caller has already known former write/fsync
failure or not, then avoid potenical rdundant error handling,
like confused error message.
Miss to set 'lli_async_rc' if ll_writepage() failed.
Signed-off-by: Fan Yong <yong.fan@whamcloud.com>
Change-Id: I62d9cd83fc03fad22c994f2a77774ca113a6c057
Reviewed-on: http://review.whamcloud.com/1497
Tested-by: Hudson
Reviewed-by: Niu Yawei <niu@whamcloud.com>
Reviewed-by: Jinshan Xiong <jinshan.xiong@whamcloud.com>
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
struct ll_file_data *ll_file_data_get(void)
{
struct ll_file_data *ll_file_data_get(void)
{
- struct ll_file_data *fd;
+ struct ll_file_data *fd;
- OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, CFS_ALLOC_IO);
- return fd;
+ OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, CFS_ALLOC_IO);
+ fd->fd_write_failed = false;
+ return fd;
}
static void ll_file_data_put(struct ll_file_data *fd)
}
static void ll_file_data_put(struct ll_file_data *fd)
-static ssize_t ll_file_io_generic(const struct lu_env *env,
- struct vvp_io_args *args, struct file *file,
- enum cl_io_type iot, loff_t *ppos, size_t count)
+static ssize_t
+ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
+ struct file *file, enum cl_io_type iot,
+ loff_t *ppos, size_t count)
- struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode);
+ struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
struct cl_io *io;
ssize_t result;
ENTRY;
struct cl_io *io;
ssize_t result;
ENTRY;
if (result >= 0) {
ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode),
LPROC_LL_WRITE_BYTES, result);
if (result >= 0) {
ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode),
LPROC_LL_WRITE_BYTES, result);
- lli->lli_write_rc = 0;
- } else {
- lli->lli_write_rc = result;
- }
- }
+ fd->fd_write_failed = false;
+ } else {
+ fd->fd_write_failed = true;
+ }
+ }
int ll_flush(struct file *file, fl_owner_t id)
{
int ll_flush(struct file *file, fl_owner_t id)
{
- struct inode *inode = file->f_dentry->d_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
- int rc, err;
-
- LASSERT(!S_ISDIR(inode->i_mode));
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ int rc, err;
- /* the application should know write failure already. */
- if (lli->lli_write_rc)
- return 0;
+ LASSERT(!S_ISDIR(inode->i_mode));
- /* catch async errors that were recorded back when async writeback
- * failed for pages in this mapping. */
- rc = lli->lli_async_rc;
- lli->lli_async_rc = 0;
+ /* catch async errors that were recorded back when async writeback
+ * failed for pages in this mapping. */
+ rc = lli->lli_async_rc;
+ lli->lli_async_rc = 0;
err = lov_read_and_clear_async_rc(lli->lli_clob);
if (rc == 0)
rc = err;
err = lov_read_and_clear_async_rc(lli->lli_clob);
if (rc == 0)
rc = err;
+ /* The application has been told write failure already.
+ * Do not report failure again. */
+ if (fd->fd_write_failed)
+ return 0;
lsm = ccc_inode_lsm_get(inode);
if (data && lsm) {
lsm = ccc_inode_lsm_get(inode);
if (data && lsm) {
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+
err = cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
CL_FSYNC_ALL);
if (rc == 0 && err < 0)
rc = err;
err = cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
CL_FSYNC_ALL);
if (rc == 0 && err < 0)
rc = err;
- lli->lli_write_rc = rc < 0 ? rc : 0;
+ if (rc < 0)
+ fd->fd_write_failed = true;
+ else
+ fd->fd_write_failed = false;
}
ccc_inode_lsm_put(inode, lsm);
}
ccc_inode_lsm_put(inode, lsm);
cfs_rw_semaphore_t f_trunc_sem;
cfs_mutex_t f_write_mutex;
cfs_rw_semaphore_t f_trunc_sem;
cfs_mutex_t f_write_mutex;
+ cfs_rw_semaphore_t f_glimpse_sem;
+ cfs_time_t f_glimpse_time;
+ cfs_list_t f_agl_list;
+ __u64 f_agl_index;
+
/* for writepage() only to communicate to fsync */
/* for writepage() only to communicate to fsync */
- int f_async_rc;
- int f_write_rc;
- cfs_rw_semaphore_t f_glimpse_sem;
- cfs_time_t f_glimpse_time;
- cfs_list_t f_agl_list;
- __u64 f_agl_index;
/*
* whenever a process try to read/write the file, the
* jobid of the process will be saved here, and it'll
/*
* whenever a process try to read/write the file, the
* jobid of the process will be saved here, and it'll
#define lli_maxbytes u.f.f_maxbytes
#define lli_trunc_sem u.f.f_trunc_sem
#define lli_write_mutex u.f.f_write_mutex
#define lli_maxbytes u.f.f_maxbytes
#define lli_trunc_sem u.f.f_trunc_sem
#define lli_write_mutex u.f.f_write_mutex
-#define lli_async_rc u.f.f_async_rc
-#define lli_write_rc u.f.f_write_rc
-#define lli_glimpse_sem u.f.f_glimpse_sem
-#define lli_glimpse_time u.f.f_glimpse_time
-#define lli_agl_list u.f.f_agl_list
-#define lli_agl_index u.f.f_agl_index
-#define lli_jobid u.f.f_jobid
+#define lli_glimpse_sem u.f.f_glimpse_sem
+#define lli_glimpse_time u.f.f_glimpse_time
+#define lli_agl_list u.f.f_agl_list
+#define lli_agl_index u.f.f_agl_index
+#define lli_async_rc u.f.f_async_rc
+#define lli_jobid u.f.f_jobid
/* XXX: For following frequent used members, although they maybe special
* used for non-directory object, it is some time-wasting to check
/* XXX: For following frequent used members, although they maybe special
* used for non-directory object, it is some time-wasting to check
struct ll_file_dir fd_dir;
__u32 fd_flags;
struct file *fd_file;
struct ll_file_dir fd_dir;
__u32 fd_flags;
struct file *fd_file;
+ /* Indicate whether need to report failure when close.
+ * true: failure is known, not report again.
+ * false: unknown failure, should report. */
+ bool fd_write_failed;
lli->lli_symlink_name = NULL;
cfs_init_rwsem(&lli->lli_trunc_sem);
cfs_mutex_init(&lli->lli_write_mutex);
lli->lli_symlink_name = NULL;
cfs_init_rwsem(&lli->lli_trunc_sem);
cfs_mutex_init(&lli->lli_write_mutex);
- lli->lli_async_rc = 0;
- lli->lli_write_rc = 0;
- cfs_init_rwsem(&lli->lli_glimpse_sem);
- lli->lli_glimpse_time = 0;
- CFS_INIT_LIST_HEAD(&lli->lli_agl_list);
- lli->lli_agl_index = 0;
- }
+ cfs_init_rwsem(&lli->lli_glimpse_sem);
+ lli->lli_glimpse_time = 0;
+ CFS_INIT_LIST_HEAD(&lli->lli_agl_list);
+ lli->lli_agl_index = 0;
+ lli->lli_async_rc = 0;
+ }
cfs_mutex_init(&lli->lli_layout_mutex);
}
cfs_mutex_init(&lli->lli_layout_mutex);
}
int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
{
int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
{
- struct inode *inode = vmpage->mapping->host;
+ struct inode *inode = vmpage->mapping->host;
+ struct ll_inode_info *lli = ll_i2info(inode);
struct lu_env *env;
struct cl_io *io;
struct cl_page *page;
struct cl_object *clob;
struct cl_env_nest nest;
struct lu_env *env;
struct cl_io *io;
struct cl_page *page;
struct cl_object *clob;
struct cl_env_nest nest;
+ bool redirtied = false;
+ bool unlocked = false;
int result;
ENTRY;
LASSERT(PageLocked(vmpage));
LASSERT(!PageWriteback(vmpage));
int result;
ENTRY;
LASSERT(PageLocked(vmpage));
LASSERT(!PageWriteback(vmpage));
- if (ll_i2dtexp(inode) == NULL)
- RETURN(-EINVAL);
+ LASSERT(ll_i2dtexp(inode) != NULL);
- env = cl_env_nested_get(&nest);
- if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ GOTO(out, result = PTR_ERR(env));
clob = ll_i2info(inode)->lli_clob;
LASSERT(clob != NULL);
clob = ll_i2info(inode)->lli_clob;
LASSERT(clob != NULL);
if (!PageError(vmpage)) {
redirty_page_for_writepage(wbc, vmpage);
result = 0;
if (!PageError(vmpage)) {
redirty_page_for_writepage(wbc, vmpage);
result = 0;
}
}
cl_page_disown(env, io, page);
}
}
cl_page_disown(env, io, page);
lu_ref_del(&page->cp_reference,
"writepage", cfs_current());
cl_page_put(env, page);
lu_ref_del(&page->cp_reference,
"writepage", cfs_current());
cl_page_put(env, page);
+ } else {
+ result = PTR_ERR(page);
+ }
}
cl_env_nested_put(&nest, env);
}
cl_env_nested_put(&nest, env);
+ GOTO(out, result);
+
+out:
+ if (result < 0) {
+ if (!lli->lli_async_rc)
+ lli->lli_async_rc = result;
+ SetPageError(vmpage);
+ if (!unlocked)
+ unlock_page(vmpage);
+ }
+ return result;
}
int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
}
int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)