#include "llite_internal.h"
#include "vvp_internal.h"
+static struct vvp_io *cl2vvp_io(const struct lu_env *env,
+ const struct cl_io_slice *slice)
+{
+ struct vvp_io *vio;
+
+ vio = container_of(slice, struct vvp_io, vui_cl);
+ LASSERT(vio == vvp_env_io(env));
+
+ return vio;
+}
+
/**
* True, if \a io is a normal io, False for splice_{read,write}
*/
-int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
+static int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
{
- struct vvp_io *vio = vvp_env_io(env);
+ struct vvp_io *vio = vvp_env_io(env);
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
+ LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- return vio->cui_io_subtype == IO_NORMAL;
+ return vio->vui_io_subtype == IO_NORMAL;
}
/**
struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct vvp_io *cio = vvp_env_io(env);
+ struct vvp_io *vio = vvp_env_io(env);
bool rc = true;
switch (io->ci_type) {
case CIT_WRITE:
/* don't need lock here to check lli_layout_gen as we have held
* extent lock and GROUP lock has to hold to swap layout */
- if (ll_layout_version_get(lli) != cio->cui_layout_gen) {
+ if (ll_layout_version_get(lli) != vio->vui_layout_gen) {
io->ci_need_restart = 1;
/* this will return application a short read/write */
io->ci_continue = 0;
return rc;
}
+static void vvp_object_size_lock(struct cl_object *obj)
+{
+ struct inode *inode = vvp_object_inode(obj);
+
+ ll_inode_size_lock(inode);
+ cl_object_attr_lock(obj);
+}
+
+static void vvp_object_size_unlock(struct cl_object *obj)
+{
+ struct inode *inode = vvp_object_inode(obj);
+
+ cl_object_attr_unlock(obj);
+ ll_inode_size_unlock(inode);
+}
+
+/**
+ * Helper function that if necessary adjusts file size (inode->i_size), when
+ * position at the offset \a pos is accessed. File size can be arbitrary stale
+ * on a Lustre client, but client at least knows KMS. If accessed area is
+ * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
+ *
+ * Locking: i_size_lock is used to serialize changes to inode size and to
+ * protect consistency between inode size and cl_object
+ * attributes. cl_object_size_lock() protects consistency between cl_attr's of
+ * top-object and sub-objects.
+ */
+static int vvp_prep_size(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io, loff_t start, size_t count,
+ int *exceed)
+{
+ struct cl_attr *attr = ccc_env_thread_attr(env);
+ struct inode *inode = vvp_object_inode(obj);
+ loff_t pos = start + count - 1;
+ loff_t kms;
+ int result;
+
+ /*
+ * Consistency guarantees: following possibilities exist for the
+ * relation between region being accessed and real file size at this
+ * moment:
+ *
+ * (A): the region is completely inside of the file;
+ *
+ * (B-x): x bytes of region are inside of the file, the rest is
+ * outside;
+ *
+ * (C): the region is completely outside of the file.
+ *
+ * This classification is stable under DLM lock already acquired by
+ * the caller, because to change the class, other client has to take
+ * DLM lock conflicting with our lock. Also, any updates to ->i_size
+ * by other threads on this client are serialized by
+ * ll_inode_size_lock(). This guarantees that short reads are handled
+ * correctly in the face of concurrent writes and truncates.
+ */
+ vvp_object_size_lock(obj);
+ result = cl_object_attr_get(env, obj, attr);
+ if (result == 0) {
+ kms = attr->cat_kms;
+ if (pos > kms) {
+ /*
+ * A glimpse is necessary to determine whether we
+ * return a short read (B) or some zeroes at the end
+ * of the buffer (C)
+ */
+ vvp_object_size_unlock(obj);
+ result = cl_glimpse_lock(env, io, inode, obj, 0);
+ if (result == 0 && exceed != NULL) {
+ /* If objective page index exceed end-of-file
+ * page index, return directly. Do not expect
+ * kernel will check such case correctly.
+ * linux-2.6.18-128.1.1 miss to do that.
+ * --bug 17336 */
+ loff_t size = i_size_read(inode);
+ unsigned long cur_index = start >>
+ PAGE_CACHE_SHIFT;
+
+ if ((size == 0 && cur_index != 0) ||
+ (((size - 1) >> PAGE_CACHE_SHIFT) <
+ cur_index))
+ *exceed = 1;
+ }
+
+ return result;
+ } else {
+ /*
+ * region is within kms and, hence, within real file
+ * size (A). We need to increase i_size to cover the
+ * read region so that generic_file_read() will do its
+ * job, but that doesn't mean the kms size is
+ * _correct_, it is only the _minimum_ size. If
+ * someone does a stat they will get the correct size
+ * which will always be >= the kms value here.
+ * b=11081
+ */
+ if (i_size_read(inode) < kms) {
+ i_size_write(inode, kms);
+ CDEBUG(D_VFSTRACE,
+ DFID" updating i_size "LPU64"\n",
+ PFID(lu_object_fid(&obj->co_lu)),
+ (__u64)i_size_read(inode));
+ }
+ }
+ }
+
+ vvp_object_size_unlock(obj);
+
+ return result;
+}
+
/*****************************************************************************
*
* io operations.
*
*/
+static int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
+ __u32 enqflags, enum cl_lock_mode mode,
+ pgoff_t start, pgoff_t end)
+{
+ struct vvp_io *vio = vvp_env_io(env);
+ struct cl_lock_descr *descr = &vio->vui_link.cill_descr;
+ struct cl_object *obj = io->ci_obj;
+
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+ ENTRY;
+
+ CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
+
+ memset(&vio->vui_link, 0, sizeof vio->vui_link);
+
+ if (vio->vui_fd && (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ descr->cld_mode = CLM_GROUP;
+ descr->cld_gid = vio->vui_fd->fd_grouplock.cg_gid;
+ } else {
+ descr->cld_mode = mode;
+ }
+
+ descr->cld_obj = obj;
+ descr->cld_start = start;
+ descr->cld_end = end;
+ descr->cld_enq_flags = enqflags;
+
+ cl_io_lock_add(env, io, &vio->vui_link);
+
+ RETURN(0);
+}
+
+static int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io,
+ __u32 enqflags, enum cl_lock_mode mode,
+ loff_t start, loff_t end)
+{
+ struct cl_object *obj = io->ci_obj;
+
+ return vvp_io_one_lock_index(env, io, enqflags, mode,
+ cl_index(obj, start), cl_index(obj, end));
+}
+
static int vvp_io_write_iter_init(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct vvp_io *cio = cl2vvp_io(env, ios);
+ struct vvp_io *vio = cl2vvp_io(env, ios);
- cl_page_list_init(&cio->u.write.cui_queue);
- cio->u.write.cui_written = 0;
- cio->u.write.cui_from = 0;
- cio->u.write.cui_to = PAGE_SIZE;
+ cl_page_list_init(&vio->u.write.vui_queue);
+ vio->u.write.vui_written = 0;
+ vio->u.write.vui_from = 0;
+ vio->u.write.vui_to = PAGE_SIZE;
return 0;
}
static void vvp_io_write_iter_fini(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct vvp_io *cio = cl2vvp_io(env, ios);
+ struct vvp_io *vio = cl2vvp_io(env, ios);
- LASSERT(cio->u.write.cui_queue.pl_nr == 0);
+ LASSERT(vio->u.write.vui_queue.pl_nr == 0);
}
static int vvp_io_fault_iter_init(const struct lu_env *env,
struct vvp_io *vio = cl2vvp_io(env, ios);
struct inode *inode = vvp_object_inode(ios->cis_obj);
- LASSERT(inode == vio->cui_fd->fd_file->f_dentry->d_inode);
+ LASSERT(inode == vio->vui_fd->fd_file->f_dentry->d_inode);
vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime);
return 0;
{
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct vvp_io *cio = cl2vvp_io(env, ios);
+ struct vvp_io *vio = cl2vvp_io(env, ios);
struct inode *inode = vvp_object_inode(obj);
CLOBINVRNT(env, obj, vvp_object_invariant(obj));
"restore needed %d\n",
PFID(lu_object_fid(&obj->co_lu)),
io->ci_ignore_layout, io->ci_verify_layout,
- cio->cui_layout_gen, io->ci_restore_needed);
+ vio->vui_layout_gen, io->ci_restore_needed);
if (io->ci_restore_needed == 1) {
int rc;
/* check layout version */
ll_layout_refresh(inode, &gen);
- io->ci_need_restart = cio->cui_layout_gen != gen;
+ io->ci_need_restart = vio->vui_layout_gen != gen;
if (io->ci_need_restart) {
CDEBUG(D_VFSTRACE,
DFID" layout changed from %d to %d.\n",
PFID(lu_object_fid(&obj->co_lu)),
- cio->cui_layout_gen, gen);
+ vio->vui_layout_gen, gen);
/* today successful restore is the only possible
* case */
/* restore was done, clear restoring state */
if (!cl_is_normalio(env, io))
RETURN(0);
- if (vio->cui_iov == NULL) /* nfs or loop back device write */
- RETURN(0);
+ if (vio->vui_iov == NULL) /* nfs or loop back device write */
+ RETURN(0);
- /* No MM (e.g. NFS)? No vmas too. */
- if (mm == NULL)
- RETURN(0);
+ /* No MM (e.g. NFS)? No vmas too. */
+ if (mm == NULL)
+ RETURN(0);
- for (seg = 0; seg < vio->cui_nrsegs; seg++) {
- const struct iovec *iv = &vio->cui_iov[seg];
+ for (seg = 0; seg < vio->vui_nrsegs; seg++) {
+ const struct iovec *iv = &vio->vui_iov[seg];
addr = (unsigned long)iv->iov_base;
count = iv->iov_len;
RETURN(result);
}
+static void vvp_io_advance(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ size_t nob)
+{
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = ios->cis_io->ci_obj;
+
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+
+ if (!cl_is_normalio(env, io))
+ return;
+
+ LASSERT(vio->vui_tot_nrsegs >= vio->vui_nrsegs);
+ LASSERT(vio->vui_tot_count >= nob);
+
+ vio->vui_iov += vio->vui_nrsegs;
+ vio->vui_tot_nrsegs -= vio->vui_nrsegs;
+ vio->vui_tot_count -= nob;
+
+ /* update the iov */
+ if (vio->vui_iov_olen > 0) {
+ struct iovec *iv;
+
+ vio->vui_iov--;
+ vio->vui_tot_nrsegs++;
+ iv = &vio->vui_iov[0];
+ if (io->ci_continue) {
+ iv->iov_base += iv->iov_len;
+ LASSERT(vio->vui_iov_olen > iv->iov_len);
+ iv->iov_len = vio->vui_iov_olen - iv->iov_len;
+ } else {
+ /* restore the iov_len, in case of restart io. */
+ iv->iov_len = vio->vui_iov_olen;
+ }
+ vio->vui_iov_olen = 0;
+ }
+}
+
+static void vvp_io_update_iov(const struct lu_env *env,
+ struct vvp_io *vio, struct cl_io *io)
+{
+ int i;
+ size_t size = io->u.ci_rw.crw_count;
+
+ vio->vui_iov_olen = 0;
+ if (!cl_is_normalio(env, io) || vio->vui_tot_nrsegs == 0)
+ return;
+
+ for (i = 0; i < vio->vui_tot_nrsegs; i++) {
+ struct iovec *iv = &vio->vui_iov[i];
+
+ if (iv->iov_len < size) {
+ size -= iv->iov_len;
+ } else {
+ if (iv->iov_len > size) {
+ vio->vui_iov_olen = iv->iov_len;
+ iv->iov_len = size;
+ }
+ break;
+ }
+ }
+
+ vio->vui_nrsegs = i + 1;
+ LASSERTF(vio->vui_tot_nrsegs >= vio->vui_nrsegs,
+ "tot_nrsegs: %lu, nrsegs: %lu\n",
+ vio->vui_tot_nrsegs, vio->vui_nrsegs);
+}
+
static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
enum cl_lock_mode mode, loff_t start, loff_t end)
{
- struct vvp_io *cio = vvp_env_io(env);
+ struct vvp_io *vio = vvp_env_io(env);
int result;
int ast_flags = 0;
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
ENTRY;
- vvp_io_update_iov(env, cio, io);
+ vvp_io_update_iov(env, vio, io);
if (io->u.ci_rw.crw_nonblock)
ast_flags |= CEF_NONBLOCK;
- result = vvp_mmap_locks(env, cio, io);
+ result = vvp_mmap_locks(env, vio, io);
if (result == 0)
result = vvp_io_one_lock(env, io, ast_flags, mode, start, end);
static int vvp_io_setattr_lock(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct vvp_io *cio = vvp_env_io(env);
+ struct vvp_io *vio = vvp_env_io(env);
struct cl_io *io = ios->cis_io;
__u64 new_size;
__u32 enqflags = 0;
return 0;
new_size = 0;
}
- cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK;
+
+ vio->u.setattr.vui_local_lock = SETATTR_EXTENT_LOCK;
return vvp_io_one_lock(env, io, enqflags, CLM_WRITE,
new_size, OBD_OBJECT_EOF);
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
struct inode *inode = vvp_object_inode(obj);
- struct file *file = vio->cui_fd->fd_file;
+ struct file *file = vio->vui_fd->fd_file;
int result;
loff_t pos = io->u.ci_rd.rd.crw_pos;
long cnt = io->u.ci_rd.rd.crw_count;
- long tot = vio->cui_tot_count;
+ long tot = vio->vui_tot_count;
int exceed = 0;
CLOBINVRNT(env, obj, vvp_object_invariant(obj));
if (!can_populate_pages(env, io, inode))
return 0;
- result = ccc_prep_size(env, obj, io, pos, tot, &exceed);
+ result = vvp_prep_size(env, obj, io, pos, tot, &exceed);
if (result != 0)
return result;
else if (exceed != 0)
inode->i_ino, cnt, pos, i_size_read(inode));
/* turn off the kernel's read-ahead */
- vio->cui_fd->fd_file->f_ra.ra_pages = 0;
+ vio->vui_fd->fd_file->f_ra.ra_pages = 0;
/* initialize read-ahead window once per syscall */
- if (!vio->cui_ra_valid) {
- vio->cui_ra_valid = true;
- vio->cui_ra_start = cl_index(obj, pos);
- vio->cui_ra_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
+ if (!vio->vui_ra_valid) {
+ vio->vui_ra_valid = true;
+ vio->vui_ra_start = cl_index(obj, pos);
+ vio->vui_ra_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
ll_ras_enter(file);
}
- /* BUG: 5972 */
- file_accessed(file);
- switch (vio->cui_io_subtype) {
- case IO_NORMAL:
- LASSERT(vio->cui_iocb->ki_pos == pos);
- result = generic_file_aio_read(vio->cui_iocb,
- vio->cui_iov, vio->cui_nrsegs,
- vio->cui_iocb->ki_pos);
+ /* BUG: 5972 */
+ file_accessed(file);
+ switch (vio->vui_io_subtype) {
+ case IO_NORMAL:
+ LASSERT(vio->vui_iocb->ki_pos == pos);
+ result = generic_file_aio_read(vio->vui_iocb,
+ vio->vui_iov, vio->vui_nrsegs,
+ vio->vui_iocb->ki_pos);
break;
case IO_SPLICE:
result = generic_file_splice_read(file, &pos,
- vio->u.splice.cui_pipe, cnt,
- vio->u.splice.cui_flags);
+ vio->u.splice.vui_pipe, cnt,
+ vio->u.splice.vui_flags);
/* LU-1109: do splice read stripe by stripe otherwise if it
* may make nfsd stuck if this read occupied all internal pipe
* buffers. */
io->ci_continue = 0;
break;
default:
- CERROR("Wrong IO type %u\n", vio->cui_io_subtype);
+ CERROR("Wrong IO type %u\n", vio->vui_io_subtype);
LBUG();
}
if (result < cnt)
io->ci_continue = 0;
io->ci_nob += result;
- ll_rw_stats_tally(ll_i2sbi(inode), current->pid, vio->cui_fd,
+ ll_rw_stats_tally(ll_i2sbi(inode), current->pid, vio->vui_fd,
pos, result, READ);
result = 0;
}
{
struct cl_object *obj = io->ci_obj;
struct inode *inode = vvp_object_inode(obj);
- struct vvp_io *cio = vvp_env_io(env);
- struct cl_page_list *queue = &cio->u.write.cui_queue;
+ struct vvp_io *vio = vvp_env_io(env);
+ struct cl_page_list *queue = &vio->u.write.vui_queue;
struct cl_page *page;
int rc = 0;
int bytes = 0;
- unsigned int npages = cio->u.write.cui_queue.pl_nr;
+ unsigned int npages = vio->u.write.vui_queue.pl_nr;
ENTRY;
if (npages == 0)
RETURN(0);
CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n",
- npages, cio->u.write.cui_from, cio->u.write.cui_to);
+ npages, vio->u.write.vui_from, vio->u.write.vui_to);
LASSERT(page_list_sanity_check(obj, queue));
/* submit IO with async write */
rc = cl_io_commit_async(env, io, queue,
- cio->u.write.cui_from, cio->u.write.cui_to,
+ vio->u.write.vui_from, vio->u.write.vui_to,
write_commit_callback);
npages -= queue->pl_nr; /* already committed pages */
if (npages > 0) {
bytes = npages << PAGE_SHIFT;
/* first page */
- bytes -= cio->u.write.cui_from;
+ bytes -= vio->u.write.vui_from;
if (queue->pl_nr == 0) /* last page */
- bytes -= PAGE_SIZE - cio->u.write.cui_to;
+ bytes -= PAGE_SIZE - vio->u.write.vui_to;
LASSERTF(bytes > 0, "bytes = %d, pages = %d\n", bytes, npages);
- cio->u.write.cui_written += bytes;
+ vio->u.write.vui_written += bytes;
CDEBUG(D_VFSTRACE, "Committed %d pages %d bytes, tot: %ld\n",
- npages, bytes, cio->u.write.cui_written);
+ npages, bytes, vio->u.write.vui_written);
/* the first page must have been written. */
- cio->u.write.cui_from = 0;
+ vio->u.write.vui_from = 0;
}
LASSERT(page_list_sanity_check(obj, queue));
LASSERT(ergo(rc == 0, queue->pl_nr == 0));
/* out of quota, try sync write */
if (rc == -EDQUOT && !cl_io_is_mkwrite(io)) {
rc = vvp_io_commit_sync(env, io, queue,
- cio->u.write.cui_from,
- cio->u.write.cui_to);
+ vio->u.write.vui_from,
+ vio->u.write.vui_to);
if (rc > 0) {
- cio->u.write.cui_written += rc;
+ vio->u.write.vui_written += rc;
rc = 0;
}
}
static int vvp_io_write_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct vvp_io *cio = cl2vvp_io(env, ios);
+ struct vvp_io *vio = cl2vvp_io(env, ios);
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
struct inode *inode = vvp_object_inode(obj);
if (!can_populate_pages(env, io, inode))
RETURN(0);
- if (cl_io_is_append(io)) {
- /*
- * PARALLEL IO This has to be changed for parallel IO doing
- * out-of-order writes.
- */
+ if (cl_io_is_append(io)) {
+ /*
+ * PARALLEL IO This has to be changed for parallel IO doing
+ * out-of-order writes.
+ */
ll_merge_attr(env, inode);
- pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
- cio->cui_iocb->ki_pos = pos;
- } else {
- LASSERT(cio->cui_iocb->ki_pos == pos);
+ pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
+ vio->vui_iocb->ki_pos = pos;
+ } else {
+ LASSERT(vio->vui_iocb->ki_pos == pos);
}
CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
- if (cio->cui_iov == NULL) {
+ if (vio->vui_iov == NULL) {
/* from a temp io in ll_cl_init(). */
result = 0;
} else {
* consistency, proper locking to protect against writes,
* trucates, etc. is handled in the higher layers of lustre.
*/
- result = __generic_file_aio_write(cio->cui_iocb,
- cio->cui_iov, cio->cui_nrsegs,
- &cio->cui_iocb->ki_pos);
+ result = __generic_file_aio_write(vio->vui_iocb,
+ vio->vui_iov, vio->vui_nrsegs,
+ &vio->vui_iocb->ki_pos);
if (result > 0 || result == -EIOCBQUEUED) {
ssize_t err;
- err = generic_write_sync(cio->cui_iocb->ki_filp,
+ err = generic_write_sync(vio->vui_iocb->ki_filp,
pos, result);
if (err < 0 && result > 0)
result = err;
}
if (result > 0) {
result = vvp_io_write_commit(env, io);
- if (cio->u.write.cui_written > 0) {
- result = cio->u.write.cui_written;
+ if (vio->u.write.vui_written > 0) {
+ result = vio->u.write.vui_written;
io->ci_nob += result;
CDEBUG(D_VFSTRACE, "write: nob %zd, result: %zd\n",
if (result < cnt)
io->ci_continue = 0;
ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
- cio->cui_fd, pos, result, WRITE);
+ vio->vui_fd, pos, result, WRITE);
result = 0;
}
/* offset of the last byte on the page */
offset = cl_offset(obj, fio->ft_index + 1) - 1;
LASSERT(cl_index(obj, offset) == fio->ft_index);
- result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
+ result = vvp_prep_size(env, obj, io, 0, offset + 1, NULL);
if (result != 0)
RETURN(result);
struct cl_page *page = slice->cpl_page;
struct inode *inode = vvp_object_inode(slice->cpl_obj);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_file_data *fd = cl2vvp_io(env, ios)->cui_fd;
+ struct ll_file_data *fd = cl2vvp_io(env, ios)->vui_fd;
struct ll_readahead_state *ras = &fd->fd_ras;
struct cl_2queue *queue = &io->ci_queue;
RETURN(0);
}
+static void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
+{
+ CLOBINVRNT(env, ios->cis_io->ci_obj,
+ vvp_object_invariant(ios->cis_io->ci_obj));
+}
+
static const struct cl_io_operations vvp_io_ops = {
.op = {
[CIT_READ] = {
"restore needed %d\n",
PFID(lu_object_fid(&obj->co_lu)),
io->ci_ignore_layout, io->ci_verify_layout,
- vio->cui_layout_gen, io->ci_restore_needed);
+ vio->vui_layout_gen, io->ci_restore_needed);
- CL_IO_SLICE_CLEAN(vio, cui_cl);
- cl_io_slice_add(io, &vio->cui_cl, obj, &vvp_io_ops);
- vio->cui_ra_valid = false;
+ CL_IO_SLICE_CLEAN(vio, vui_cl);
+ cl_io_slice_add(io, &vio->vui_cl, obj, &vvp_io_ops);
+ vio->vui_ra_valid = false;
result = 0;
if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
size_t count;
if (count == 0)
result = 1;
else {
- vio->cui_tot_count = count;
- vio->cui_tot_nrsegs = 0;
+ vio->vui_tot_count = count;
+ vio->vui_tot_nrsegs = 0;
}
/* for read/write, we store the jobid in the inode, and
* even for operations requiring to open file, such as read and write,
* because it might not grant layout lock in IT_OPEN. */
if (result == 0 && !io->ci_ignore_layout) {
- result = ll_layout_refresh(inode, &vio->cui_layout_gen);
+ result = ll_layout_refresh(inode, &vio->vui_layout_gen);
if (result == -ENOENT)
/* If the inode on MDS has been removed, but the objects
* on OSTs haven't been destroyed (async unlink), layout