cfs_time_current||jiffies
cfs_time_current_64||ktime_get
cfs_time_current_sec||ktime_get_real_seconds
+ci_nob||ci_bytes
CLASSERT||BUILD_BUG_ON()
+crw_count||crw_bytes
msecs_to_jiffies||cfs_time_seconds
DEFINE_TIMER||CFS_DEFINE_TIMER
define OBD_CONNECT||see "XXX README XXX" below and contact adilger@whamcloud.com
ENOTSUPP||EOPNOTSUPP
ERR_PTR.PTR_ERR||ERR_CAST
from_timer||cfs_from_timer
+ft_nob||ft_bytes
f_dentry||f_path.dentry
[^_]get_seconds||ktime_get_real_seconds
kmem_cache_alloc.*GFP_ZERO||kmem_cache_zalloc
time_t||timeout_t
timer_setup||cfs_timer_setup
version_code.*2.1[7-9]||version 2.16.x should be used
+vui_tot_count||vui_tot_bytes
wait_queue_t||wait_queue_entry_t
struct cl_io_rw_common {
loff_t crw_pos;
- size_t crw_count;
+ size_t crw_bytes;
int crw_nonblock;
};
enum cl_setattr_subtype {
u32 dv_layout_version;
int dv_flags;
} ci_data_version;
- struct cl_fault_io {
- /** page index within file. */
- pgoff_t ft_index;
- /** bytes valid byte on a faulted page. */
- size_t ft_nob;
- /** writable page? for nopage() only */
- int ft_writable;
- /** page of an executable? */
- int ft_executable;
- /** page_mkwrite() */
- int ft_mkwrite;
- /** resulting page */
- struct cl_page *ft_page;
- } ci_fault;
+ struct cl_fault_io {
+ /** page index within file. */
+ pgoff_t ft_index;
+ /** bytes valid byte on a faulted page. */
+ size_t ft_bytes;
+ /** writable page? for nopage() only */
+ int ft_writable;
+ /** page of an executable? */
+ int ft_executable;
+ /** page_mkwrite() */
+ int ft_mkwrite;
+ /** resulting page */
+ struct cl_page *ft_page;
+ } ci_fault;
struct cl_fsync_io {
loff_t fi_start;
loff_t fi_end;
struct cl_misc_io {
time64_t lm_next_rpc_time;
} ci_misc;
- } u;
- struct cl_2queue ci_queue;
- size_t ci_nob;
- int ci_result;
- unsigned int ci_continue:1,
+ } u;
+ struct cl_2queue ci_queue;
+ size_t ci_bytes;
+ int ci_result;
+ unsigned int ci_continue:1,
/**
* This io has held grouplock, to inform sublayers that
* don't do lockless i/o.
/** \defgroup cl_io cl_io
* @{ */
-int cl_io_init (const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, struct cl_object *obj);
-int cl_io_sub_init (const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, struct cl_object *obj);
-int cl_io_rw_init (const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, loff_t pos, size_t count);
-int cl_io_loop (const struct lu_env *env, struct cl_io *io);
-
-void cl_io_fini (const struct lu_env *env, struct cl_io *io);
-int cl_io_iter_init (const struct lu_env *env, struct cl_io *io);
-void cl_io_iter_fini (const struct lu_env *env, struct cl_io *io);
-int cl_io_lock (const struct lu_env *env, struct cl_io *io);
-void cl_io_unlock (const struct lu_env *env, struct cl_io *io);
-int cl_io_start (const struct lu_env *env, struct cl_io *io);
-void cl_io_end (const struct lu_env *env, struct cl_io *io);
-int cl_io_lock_add (const struct lu_env *env, struct cl_io *io,
- struct cl_io_lock_link *link);
+int cl_io_init(const struct lu_env *env, struct cl_io *io,
+ enum cl_io_type iot, struct cl_object *obj);
+int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
+ enum cl_io_type iot, struct cl_object *obj);
+int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
+ enum cl_io_type iot, loff_t pos, size_t bytes);
+int cl_io_loop(const struct lu_env *env, struct cl_io *io);
+
+void cl_io_fini(const struct lu_env *env, struct cl_io *io);
+int cl_io_iter_init(const struct lu_env *env, struct cl_io *io);
+void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io);
+int cl_io_lock(const struct lu_env *env, struct cl_io *io);
+void cl_io_unlock(const struct lu_env *env, struct cl_io *io);
+int cl_io_start(const struct lu_env *env, struct cl_io *io);
+void cl_io_end(const struct lu_env *env, struct cl_io *io);
+int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
+ struct cl_io_lock_link *link);
int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
struct cl_lock_descr *descr);
int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io,
loff_t pos, size_t bytes);
int cl_io_read_ahead (const struct lu_env *env, struct cl_io *io,
pgoff_t start, struct cl_read_ahead *ra);
-void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io,
- size_t nob);
+void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io,
+ size_t bytes);
/**
* True, iff \a io is an O_APPEND write(2).
static ssize_t
ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
struct file *file, enum cl_io_type iot,
- loff_t *ppos, size_t count)
+ loff_t *ppos, size_t bytes)
{
struct vvp_io *vio = vvp_env_io(env);
struct inode *inode = file_inode(file);
bool is_aio = false;
bool is_parallel_dio = false;
struct cl_dio_aio *ci_dio_aio = NULL;
- size_t per_bytes;
- bool partial_io = false;
- size_t max_io_pages, max_cached_pages;
+ size_t per_bytes, max_io_bytes;
+ bool partial_io;
ENTRY;
- CDEBUG(D_VFSTRACE, "%s: %s ppos: %llu, count: %zu\n",
+ CDEBUG(D_VFSTRACE, "%s: %s ppos: %llu, bytes: %zu\n",
file_dentry(file)->d_name.name,
- iot == CIT_READ ? "read" : "write", *ppos, count);
+ iot == CIT_READ ? "read" : "write", *ppos, bytes);
- max_io_pages = PTLRPC_MAX_BRW_PAGES * OBD_MAX_RIF_DEFAULT;
- max_cached_pages = sbi->ll_cache->ccc_lru_max;
- if (max_io_pages > (max_cached_pages >> 2))
- max_io_pages = max_cached_pages >> 2;
+ max_io_bytes = min_t(size_t, PTLRPC_MAX_BRW_PAGES * OBD_MAX_RIF_DEFAULT,
+ sbi->ll_cache->ccc_lru_max >> 2) << PAGE_SHIFT;
io = vvp_env_thread_io(env);
if (file->f_flags & O_DIRECT) {
* if we have small max_cached_mb but large block IO issued, io
* could not be finished and blocked whole client.
*/
- if (file->f_flags & O_DIRECT)
- per_bytes = count;
- else
- per_bytes = min(max_io_pages << PAGE_SHIFT, count);
- partial_io = per_bytes < count;
+ if (file->f_flags & O_DIRECT || bytes < max_io_bytes) {
+ per_bytes = bytes;
+ partial_io = false;
+ } else {
+ per_bytes = max_io_bytes;
+ partial_io = true;
+ }
io = vvp_env_thread_io(env);
ll_io_init(io, file, iot, args);
io->ci_dio_aio = ci_dio_aio;
range_locked = false;
}
- if (io->ci_nob > 0) {
+ if (io->ci_bytes > 0) {
if (rc2 == 0) {
- result += io->ci_nob;
+ result += io->ci_bytes;
*ppos = io->u.ci_wr.wr.crw_pos; /* for splice */
} else if (rc2) {
result = 0;
}
- count -= io->ci_nob;
+ bytes -= io->ci_bytes;
/* prepare IO restart */
- if (count > 0)
+ if (bytes > 0)
args->u.normal.via_iter = vio->vui_iter;
if (partial_io) {
* Reexpand iov count because it was zero
* after IO finish.
*/
- iov_iter_reexpand(vio->vui_iter, count);
- if (per_bytes == io->ci_nob)
+ iov_iter_reexpand(vio->vui_iter, bytes);
+ if (per_bytes == io->ci_bytes)
io->ci_need_restart = 1;
}
}
iot, rc, result, io->ci_need_restart);
if ((!rc || rc == -ENODATA || rc == -ENOLCK || rc == -EIOCBQUEUED) &&
- count > 0 && io->ci_need_restart && retries-- > 0) {
+ bytes > 0 && io->ci_need_restart && retries-- > 0) {
CDEBUG(D_VFSTRACE,
- "%s: restart %s from ppos=%lld count=%zu retries=%u ret=%zd: rc = %d\n",
+ "%s: restart %s from ppos=%lld bytes=%zu retries=%u ret=%zd: rc = %d\n",
file_dentry(file)->d_name.name,
iot == CIT_READ ? "read" : "write",
- *ppos, count, retries, result, rc);
+ *ppos, bytes, retries, result, rc);
/* preserve the tried count for FLR */
retried = io->ci_ndelay_tried;
dio_lock = io->ci_dio_lock;
return test_bit(LL_SBI_PARALLEL_DIO, sbi->ll_flags);
}
-void ll_ras_enter(struct file *f, loff_t pos, size_t count);
+void ll_ras_enter(struct file *f, loff_t pos, size_t bytes);
/* llite/lcommon_misc.c */
int cl_ocd_update(struct obd_device *host, struct obd_device *watched,
static const struct vm_operations_struct ll_file_vm_ops;
void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
- unsigned long addr, size_t count)
+ unsigned long addr, size_t bytes)
{
policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
(vma->vm_pgoff << PAGE_SHIFT);
- policy->l_extent.end = (policy->l_extent.start + count - 1) |
+ policy->l_extent.end = (policy->l_extent.start + bytes - 1) |
~PAGE_MASK;
}
#endif
struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
- size_t count)
+ size_t bytes)
{
struct vm_area_struct *vma, *ret = NULL;
struct vma_iterator vmi;
vma_iter_init(&vmi, mm, addr);
for_each_vma(vmi, vma) {
- if (vma->vm_start < (addr + count))
+ if (vma->vm_start < (addr + bytes))
break;
if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
vma->vm_flags & VM_SHARED) {
* If it is in the stride window, return true, otherwise return false.
*/
static bool read_in_stride_window(struct ll_readahead_state *ras,
- loff_t pos, loff_t count)
+ loff_t pos, loff_t bytes)
{
loff_t stride_gap;
/* If it is contiguous read */
if (stride_gap == 0)
- return ras->ras_consecutive_bytes + count <=
+ return ras->ras_consecutive_bytes + bytes <=
ras->ras_stride_bytes;
/* Otherwise check the stride by itself */
return (ras->ras_stride_length - ras->ras_stride_bytes) == stride_gap &&
ras->ras_consecutive_bytes == ras->ras_stride_bytes &&
- count <= ras->ras_stride_bytes;
+ bytes <= ras->ras_stride_bytes;
}
static void ras_init_stride_detector(struct ll_readahead_state *ras,
- loff_t pos, loff_t count)
+ loff_t pos, loff_t bytes)
{
loff_t stride_gap = pos - ras->ras_last_read_end_bytes - 1;
- LASSERT(ras->ras_consecutive_stride_requests == 0);
+ LASSERT(ras->ras_consecutive_stride_requests == 0);
if (pos <= ras->ras_last_read_end_bytes) {
- /*Reset stride window for forward read*/
- ras_stride_reset(ras);
- return;
- }
+ /* Reset stride window for forward read */
+ ras_stride_reset(ras);
+ return;
+ }
ras->ras_stride_bytes = ras->ras_consecutive_bytes;
ras->ras_stride_length = stride_gap + ras->ras_consecutive_bytes;
ras->ras_consecutive_stride_requests++;
ras->ras_stride_offset = pos;
- RAS_CDEBUG(ras);
+ RAS_CDEBUG(ras);
}
static unsigned long
static void ras_detect_read_pattern(struct ll_readahead_state *ras,
struct ll_sb_info *sbi,
- loff_t pos, size_t count, bool mmap)
+ loff_t pos, size_t bytes, bool mmap)
{
bool stride_detect = false;
pgoff_t index = pos >> PAGE_SHIFT;
*/
if (!is_loose_seq_read(ras, pos)) {
/* Check whether it is in stride I/O mode */
- if (!read_in_stride_window(ras, pos, count)) {
+ if (!read_in_stride_window(ras, pos, bytes)) {
if (ras->ras_consecutive_stride_requests == 0)
- ras_init_stride_detector(ras, pos, count);
+ ras_init_stride_detector(ras, pos, bytes);
else
ras_stride_reset(ras);
ras->ras_consecutive_bytes = 0;
* if invalid, it will reset the stride ra window to
* be zero.
*/
- if (!read_in_stride_window(ras, pos, count)) {
+ if (!read_in_stride_window(ras, pos, bytes)) {
ras_stride_reset(ras);
ras->ras_window_pages = 0;
ras->ras_next_readahead_idx = index;
}
}
- ras->ras_consecutive_bytes += count;
+ ras->ras_consecutive_bytes += bytes;
if (mmap) {
pgoff_t idx = ras->ras_consecutive_bytes >> PAGE_SHIFT;
unsigned long ra_range_pages =
ras->ras_need_increase_window = true;
}
- ras->ras_last_read_end_bytes = pos + count - 1;
+ ras->ras_last_read_end_bytes = pos + bytes - 1;
}
-void ll_ras_enter(struct file *f, loff_t pos, size_t count)
+void ll_ras_enter(struct file *f, loff_t pos, size_t bytes)
{
struct ll_file_data *fd = f->private_data;
struct ll_readahead_state *ras = &fd->fd_ras;
GOTO(out_unlock, 0);
}
}
- ras_detect_read_pattern(ras, sbi, pos, count, false);
+ ras_detect_read_pattern(ras, sbi, pos, bytes, false);
out_unlock:
spin_unlock(&ras->ras_lock);
}
if (!mmap) {
io_start_index = io->u.ci_rw.crw_pos >> PAGE_SHIFT;
io_end_index = (io->u.ci_rw.crw_pos +
- io->u.ci_rw.crw_count - 1) >> PAGE_SHIFT;
+ io->u.ci_rw.crw_bytes - 1) >> PAGE_SHIFT;
} else {
io_start_index = cl_page_index(page);
io_end_index = cl_page_index(page);
/**
* Total size for the left IO.
*/
- size_t vui_tot_count;
+ size_t vui_tot_bytes;
union {
struct vvp_fault_io {
* top-object and sub-objects.
*/
static int vvp_prep_size(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, loff_t start, size_t count,
+ struct cl_io *io, loff_t start, size_t bytes,
int *exceed)
{
struct cl_attr *attr = vvp_env_thread_attr(env);
- struct inode *inode = vvp_object_inode(obj);
- loff_t pos = start + count - 1;
+ struct inode *inode = vvp_object_inode(obj);
+ loff_t pos = start + bytes - 1;
loff_t kms;
int result;
struct iovec iov;
struct iov_iter i;
unsigned long addr;
- ssize_t count;
+ ssize_t bytes;
int result = 0;
ENTRY;
iov_iter_advance(&i, iov.iov_len)) {
iov = iov_iter_iovec(&i);
addr = (unsigned long)iov.iov_base;
- count = iov.iov_len;
+ bytes = iov.iov_len;
- if (count == 0)
+ if (bytes == 0)
continue;
- count += addr & ~PAGE_MASK;
+ bytes += addr & ~PAGE_MASK;
addr &= PAGE_MASK;
mmap_read_lock(mm);
- while ((vma = our_vma(mm, addr, count)) != NULL) {
+ while ((vma = our_vma(mm, addr, bytes)) != NULL) {
struct dentry *de = file_dentry(vma->vm_file);
struct inode *inode = de->d_inode;
int flags = CEF_MUST;
* io only ever reads user level buffer, and CIT_READ
* only writes on it.
*/
- policy_from_vma(&policy, vma, addr, count);
+ policy_from_vma(&policy, vma, addr, bytes);
descr->cld_mode = vvp_mode_from_vma(vma);
descr->cld_obj = ll_i2info(inode)->lli_clob;
descr->cld_start = policy.l_extent.start >> PAGE_SHIFT;
if (result < 0)
break;
- if (vma->vm_end - addr >= count)
+ if (vma->vm_end - addr >= bytes)
break;
- count -= vma->vm_end - addr;
+ bytes -= vma->vm_end - addr;
addr = vma->vm_end;
}
mmap_read_unlock(mm);
}
static void vvp_io_advance(const struct lu_env *env,
- const struct cl_io_slice *ios,
- size_t nob)
+ const struct cl_io_slice *ios, size_t bytes)
{
struct cl_object *obj = ios->cis_io->ci_obj;
struct vvp_io *vio = cl2vvp_io(env, ios);
* original position even io succeed, so instead
* of relying on VFS, we move iov iter by ourselves.
*/
- iov_iter_advance(vio->vui_iter, nob);
- CDEBUG(D_VFSTRACE, "advancing %ld bytes\n", nob);
- vio->vui_tot_count -= nob;
- iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count);
+ iov_iter_advance(vio->vui_iter, bytes);
+ CDEBUG(D_VFSTRACE, "advancing %ld bytes\n", bytes);
+ vio->vui_tot_bytes -= bytes;
+ iov_iter_reexpand(vio->vui_iter, vio->vui_tot_bytes);
}
static void vvp_io_update_iov(const struct lu_env *env,
struct vvp_io *vio, struct cl_io *io)
{
- size_t size = io->u.ci_rw.crw_count;
+ size_t size = io->u.ci_rw.crw_bytes;
if (!vio->vui_iter)
return;
ENTRY;
result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos,
- rd->crw_pos + rd->crw_count - 1);
+ rd->crw_pos + rd->crw_bytes - 1);
RETURN(result);
}
end = OBD_OBJECT_EOF;
} else {
start = io->u.ci_wr.wr.crw_pos;
- end = start + io->u.ci_wr.wr.crw_count - 1;
+ end = start + io->u.ci_wr.wr.crw_bytes - 1;
}
RETURN(vvp_io_rw_lock(env, io, CLM_WRITE, start, end));
struct ll_inode_info *lli = ll_i2info(inode);
struct file *file = vio->vui_fd->fd_file;
loff_t pos = io->u.ci_rd.rd.crw_pos;
- size_t cnt = io->u.ci_rd.rd.crw_count;
- size_t tot = vio->vui_tot_count;
+ size_t crw_bytes = io->u.ci_rd.rd.crw_bytes;
+ size_t tot_bytes = vio->vui_tot_bytes;
struct ll_cl_context *lcc;
unsigned int seq;
int exceed = 0;
CDEBUG(D_VFSTRACE, "%s: read [%llu, %llu)\n",
file_dentry(file)->d_name.name,
- pos, pos + cnt);
+ pos, pos + crw_bytes);
trunc_sem_down_read(&lli->lli_trunc_sem);
RETURN(0);
if (!(file->f_flags & O_DIRECT)) {
- result = cl_io_lru_reserve(env, io, pos, cnt);
+ result = cl_io_lru_reserve(env, io, pos, crw_bytes);
if (result)
RETURN(result);
}
/* Unless this is reading a sparse file, otherwise the lock has already
* been acquired so vvp_prep_size() is an empty op. */
- result = vvp_prep_size(env, obj, io, pos, cnt, &exceed);
+ result = vvp_prep_size(env, obj, io, pos, crw_bytes, &exceed);
if (result != 0)
RETURN(result);
else if (exceed != 0)
LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
"Read ino %lu, %zu bytes, offset %lld, size %llu\n",
- inode->i_ino, cnt, pos, i_size_read(inode));
+ inode->i_ino, crw_bytes, pos, i_size_read(inode));
/* initialize read-ahead window once per syscall */
if (!vio->vui_ra_valid) {
page_offset = pos & ~PAGE_MASK;
if (page_offset) {
vio->vui_ra_pages++;
- if (tot > PAGE_SIZE - page_offset)
- tot -= (PAGE_SIZE - page_offset);
+ if (tot_bytes > PAGE_SIZE - page_offset)
+ tot_bytes -= (PAGE_SIZE - page_offset);
else
- tot = 0;
+ tot_bytes = 0;
}
- vio->vui_ra_pages += (tot + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ vio->vui_ra_pages += (tot_bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
CDEBUG(D_READA, "tot %zu, ra_start %lu, ra_count %lu\n",
- vio->vui_tot_count, vio->vui_ra_start_idx,
+ vio->vui_tot_bytes, vio->vui_ra_start_idx,
vio->vui_ra_pages);
}
seq = read_seqbegin(&ll_i2info(inode)->lli_page_inv_lock);
result = generic_file_read_iter(vio->vui_iocb, &iter);
if (result >= 0) {
- io->ci_nob += result;
+ io->ci_bytes += result;
total_bytes_read += result;
}
/* if we got a short read or -EIO and we raced with page invalidation,
out:
if (result >= 0) {
- if (total_bytes_read < cnt)
+ if (total_bytes_read < crw_bytes)
io->ci_continue = 0;
result = 0;
} else if (result == -EIOCBQUEUED) {
- io->ci_nob += vio->u.readwrite.vui_read;
+ io->ci_bytes += vio->u.readwrite.vui_read;
vio->vui_iocb->ki_pos = pos + vio->u.readwrite.vui_read;
}
}
static int vvp_io_write_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
+ const struct cl_io_slice *ios)
{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = io->ci_obj;
- struct inode *inode = vvp_object_inode(obj);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct file *file = vio->vui_fd->fd_file;
- ssize_t result = 0;
- loff_t pos = io->u.ci_wr.wr.crw_pos;
- size_t cnt = io->u.ci_wr.wr.crw_count;
- bool lock_inode = !IS_NOSEC(inode);
- size_t nob = io->ci_nob;
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = io->ci_obj;
+ struct inode *inode = vvp_object_inode(obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct file *file = vio->vui_fd->fd_file;
+ ssize_t result = 0;
+ loff_t pos = io->u.ci_wr.wr.crw_pos;
+ size_t crw_bytes = io->u.ci_wr.wr.crw_bytes;
+ bool lock_inode = !IS_NOSEC(inode);
+ size_t ci_bytes = io->ci_bytes;
struct iov_iter iter;
size_t written = 0;
LASSERTF(vio->vui_iocb->ki_pos == pos,
"ki_pos %lld [%lld, %lld)\n",
vio->vui_iocb->ki_pos,
- pos, pos + cnt);
+ pos, pos + crw_bytes);
}
CDEBUG(D_VFSTRACE, "%s: write [%llu, %llu)\n",
- file_dentry(file)->d_name.name,
- pos, pos + cnt);
+ file_dentry(file)->d_name.name, pos, pos + crw_bytes);
/* The maximum Lustre file size is variable, based on the OST maximum
* object size and number of stripes. This needs another check in
* addition to the VFS checks earlier. */
- if (pos + cnt > ll_file_maxbytes(inode)) {
+ if (pos + crw_bytes > ll_file_maxbytes(inode)) {
CDEBUG(D_INODE,
"%s: file %s ("DFID") offset %llu > maxbytes %llu\n",
ll_i2sbi(inode)->ll_fsname,
file_dentry(file)->d_name.name,
- PFID(ll_inode2fid(inode)), pos + cnt,
+ PFID(ll_inode2fid(inode)), pos + crw_bytes,
ll_file_maxbytes(inode));
RETURN(-EFBIG);
}
RETURN(-EINVAL);
if (!(file->f_flags & O_DIRECT)) {
- result = cl_io_lru_reserve(env, io, pos, cnt);
+ result = cl_io_lru_reserve(env, io, pos, crw_bytes);
if (result)
RETURN(result);
}
}
if (vio->u.readwrite.vui_written > 0) {
result = vio->u.readwrite.vui_written;
- CDEBUG(D_VFSTRACE, "%s: write nob %zd, result: %zd\n",
+ CDEBUG(D_VFSTRACE, "%s: write bytes %zd, result: %zd\n",
file_dentry(file)->d_name.name,
- io->ci_nob, result);
- io->ci_nob += result;
+ io->ci_bytes, result);
+ io->ci_bytes += result;
} else {
io->ci_continue = 0;
}
}
- if (vio->vui_iocb->ki_pos != (pos + io->ci_nob - nob)) {
+ if (vio->vui_iocb->ki_pos != (pos + io->ci_bytes - ci_bytes)) {
CDEBUG(D_VFSTRACE,
- "%s: write position mismatch: ki_pos %lld vs. pos %lld, written %zd, commit %zd: rc = %zd\n",
+ "%s: write position mismatch: ki_pos %lld vs. pos %lld, written %zd, commit %ld: rc = %zd\n",
file_dentry(file)->d_name.name,
- vio->vui_iocb->ki_pos, pos + io->ci_nob - nob,
- written, io->ci_nob - nob, result);
+ vio->vui_iocb->ki_pos, pos + io->ci_bytes - ci_bytes,
+ written, io->ci_bytes - ci_bytes, result);
/*
* Rewind ki_pos and vui_iter to where it has
* successfully committed.
*/
- vio->vui_iocb->ki_pos = pos + io->ci_nob - nob;
+ vio->vui_iocb->ki_pos = pos + io->ci_bytes - ci_bytes;
}
if (result > 0 || result == -EIOCBQUEUED) {
set_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags);
- if (result != -EIOCBQUEUED && result < cnt)
+ if (result != -EIOCBQUEUED && result < crw_bytes)
io->ci_continue = 0;
if (result > 0)
result = 0;
/* move forward */
if (result == -EIOCBQUEUED) {
- io->ci_nob += vio->u.readwrite.vui_written;
+ io->ci_bytes += vio->u.readwrite.vui_written;
vio->vui_iocb->ki_pos = pos +
vio->u.readwrite.vui_written;
}
}
/*
- * The ft_index is only used in the case of
- * a mkwrite action. We need to check
- * our assertions are correct, since
- * we should have caught this above
+ * The ft_index is only used in the case of mkwrite action. We need to
+ * check our assertions are correct, since we should have caught this
+ * above
*/
LASSERT(!fio->ft_mkwrite || fio->ft_index <= last_index);
if (fio->ft_index == last_index)
- /*
- * Last page is mapped partially.
- */
- fio->ft_nob = size - (fio->ft_index << PAGE_SHIFT);
- else
- fio->ft_nob = PAGE_SIZE;
+ /*
+ * Last page is mapped partially.
+ */
+ fio->ft_bytes = size - (fio->ft_index << PAGE_SHIFT);
+ else
+ fio->ft_bytes = PAGE_SIZE;
- lu_ref_add(&page->cp_reference, "fault", io);
- fio->ft_page = page;
- EXIT;
+ lu_ref_add(&page->cp_reference, "fault", io);
+ fio->ft_page = page;
+ EXIT;
out:
/* return unlocked vmpage to avoid deadlocking */
vio->vui_ra_valid = false;
result = 0;
if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
- size_t count;
+ size_t bytes;
struct ll_inode_info *lli = ll_i2info(inode);
- count = io->u.ci_rw.crw_count;
+ bytes = io->u.ci_rw.crw_bytes;
/* "If nbyte is 0, read() will return 0 and have no other
* results." -- Single Unix Spec */
- if (count == 0)
+ if (bytes == 0)
result = 1;
else
- vio->vui_tot_count = count;
+ vio->vui_tot_bytes = bytes;
/* for read/write, we store the process jobid/gid/uid in the
* inode, and it'll be fetched by osc when building RPC.
case CIT_READ:
case CIT_WRITE:
lio->lis_pos = io->u.ci_rw.crw_pos;
- lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count;
+ lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_bytes;
lio->lis_io_endpos = lio->lis_endpos;
if (cl_io_is_append(io)) {
LASSERT(io->ci_type == CIT_WRITE);
io->u.ci_wr.wr_append = 1;
} else {
io->u.ci_rw.crw_pos = start;
- io->u.ci_rw.crw_count = end - start;
+ io->u.ci_rw.crw_bytes = end - start;
}
break;
}
next = min_t(loff_t, next, lio->lis_io_endpos);
io->ci_continue = next < lio->lis_io_endpos;
- io->u.ci_rw.crw_count = next - io->u.ci_rw.crw_pos;
+ io->u.ci_rw.crw_bytes = next - io->u.ci_rw.crw_pos;
lio->lis_pos = io->u.ci_rw.crw_pos;
- lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count;
+ lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_bytes;
CDEBUG(D_VFSTRACE,
"stripe: %llu chunk: [%llu, %llu) %llu, %zd\n",
(__u64)start, lio->lis_pos, lio->lis_endpos,
- (__u64)lio->lis_io_endpos, io->u.ci_rw.crw_count);
+ (__u64)lio->lis_io_endpos, io->u.ci_rw.crw_bytes);
/*
* XXX The following call should be optimized: we know, that
}
sub = lov_sub_get(env, lio, fio->ft_page->cp_lov_index);
- sub->sub_io.u.ci_fault.ft_nob = fio->ft_nob;
+ sub->sub_io.u.ci_fault.ft_bytes = fio->ft_bytes;
RETURN(lov_io_start(env, ios));
}
* \pre iot == CIT_READ || iot == CIT_WRITE
*/
int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, loff_t pos, size_t count)
+ enum cl_io_type iot, loff_t pos, size_t bytes)
{
LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
LINVRNT(io->ci_obj != NULL);
LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
"io range: %u [%llu, %llu) %u %u\n",
- iot, (__u64)pos, (__u64)pos + count,
+ iot, (__u64)pos, (__u64)pos + bytes,
io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
io->u.ci_rw.crw_pos = pos;
- io->u.ci_rw.crw_count = count;
+ io->u.ci_rw.crw_bytes = bytes;
RETURN(cl_io_init(env, io, iot, io->ci_obj));
}
EXPORT_SYMBOL(cl_io_rw_init);
EXPORT_SYMBOL(cl_io_iter_fini);
/**
- * Records that read or write io progressed \a nob bytes forward.
+ * Records that read or write io progressed \a bytes forward.
*/
-void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
+void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t bytes)
{
const struct cl_io_slice *scan;
ENTRY;
LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
- nob == 0);
+ bytes == 0);
LINVRNT(cl_io_is_loopable(io));
LINVRNT(cl_io_invariant(io));
- io->u.ci_rw.crw_pos += nob;
- io->u.ci_rw.crw_count -= nob;
+ io->u.ci_rw.crw_pos += bytes;
+ io->u.ci_rw.crw_bytes -= bytes;
/* layers have to be notified. */
list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
- nob);
+ bytes);
}
EXIT;
}
ENTRY;
do {
- size_t nob;
+ size_t bytes;
io->ci_continue = 0;
result = cl_io_iter_init(env, io);
if (result == 0) {
- nob = io->ci_nob;
+ bytes = io->ci_bytes;
result = cl_io_lock(env, io);
if (result == 0) {
/*
*/
cl_io_end(env, io);
cl_io_unlock(env, io);
- cl_io_rw_advance(env, io, io->ci_nob - nob);
+ cl_io_rw_advance(env, io, io->ci_bytes - bytes);
}
}
cl_io_iter_fini(env, io);
io = ios->cis_io;
fio = &io->u.ci_fault;
CDEBUG(D_INFO, "%lu %d %zu\n",
- fio->ft_index, fio->ft_writable, fio->ft_nob);
+ fio->ft_index, fio->ft_writable, fio->ft_bytes);
/*
* If mapping is writeable, adjust kms to cover this page,
* but do not extend kms beyond actual file size.
*/
if (fio->ft_writable)
osc_page_touch_at(env, ios->cis_obj,
- fio->ft_index, fio->ft_nob);
+ fio->ft_index, fio->ft_bytes);
RETURN(0);
}
EXPORT_SYMBOL(osc_io_fault_start);
if (likely(io->ci_type == CIT_WRITE)) {
io_start = io->u.ci_rw.crw_pos >> PAGE_SHIFT;
io_end = (io->u.ci_rw.crw_pos +
- io->u.ci_rw.crw_count - 1) >> PAGE_SHIFT;
+ io->u.ci_rw.crw_bytes - 1) >> PAGE_SHIFT;
} else {
LASSERT(cl_io_is_mkwrite(io));
io_start = io_end = io->u.ci_fault.ft_index;