void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
void cl_page_touch(const struct lu_env *env, const struct cl_page *pg,
size_t to);
-loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
-pgoff_t cl_index(const struct cl_object *obj, loff_t offset);
void cl_lock_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_lock *lock);
descr->cld_obj = io->ci_obj;
/* Convert byte offsets to pages */
- descr->cld_start = cl_index(io->ci_obj, start);
- descr->cld_end = cl_index(io->ci_obj, end);
+ descr->cld_start = start >> PAGE_SHIFT;
+ descr->cld_end = end >> PAGE_SHIFT;
descr->cld_mode = cl_mode;
/* CEF_MUST is used because we do not want to convert a
* lockahead request to a lockless lock */
lock = vvp_env_lock(env);
descr = &lock->cll_descr;
descr->cld_obj = io->ci_obj;
- descr->cld_start = cl_index(io->ci_obj, from);
- descr->cld_end = cl_index(io->ci_obj, from + PAGE_SIZE - 1);
+ descr->cld_start = from >> PAGE_SHIFT;
+ descr->cld_end = (from + PAGE_SIZE - 1) >> PAGE_SHIFT;
descr->cld_mode = CLM_WRITE;
descr->cld_enq_flags = CEF_MUST | CEF_NONBLOCK;
if (vmf->page && result == VM_FAULT_LOCKED) {
ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)),
current->pid, vma->vm_file->private_data,
- cl_offset(NULL, vmf->page->index), PAGE_SIZE,
+ vmf->page->index << PAGE_SHIFT, PAGE_SIZE,
READ);
ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
LPROC_LL_FAULT,
if (result == VM_FAULT_LOCKED) {
ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)),
current->pid, vma->vm_file->private_data,
- cl_offset(NULL, vmf->page->index), PAGE_SIZE,
+ vmf->page->index << PAGE_SHIFT, PAGE_SIZE,
WRITE);
ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
LPROC_LL_MKWRITE,
cl_io_fini(env, io);
if (redirtied && wbc->sync_mode == WB_SYNC_ALL) {
- loff_t offset = cl_offset(clob, vmpage->index);
+ loff_t offset = vmpage->index << PAGE_SHIFT;
/* Flush page failed because the extent is being written out.
* Wait for the write of extent to be finished to avoid
/* mmap does not set the ci_rw fields */
if (!mmap) {
- io_start_index = cl_index(io->ci_obj, io->u.ci_rw.crw_pos);
- io_end_index = cl_index(io->ci_obj, io->u.ci_rw.crw_pos +
- io->u.ci_rw.crw_count - 1);
+ io_start_index = io->u.ci_rw.crw_pos >> PAGE_SHIFT;
+ io_end_index = (io->u.ci_rw.crw_pos +
+ io->u.ci_rw.crw_count - 1) >> PAGE_SHIFT;
} else {
io_start_index = cl_page_index(page);
io_end_index = cl_page_index(page);
cl_2queue_init(queue);
for (i = 0; i < pv->ldp_count; i++) {
LASSERT(!(offset & (PAGE_SIZE - 1)));
- page = cl_page_find(env, obj, cl_index(obj, offset),
+ page = cl_page_find(env, obj, offset >> PAGE_SHIFT,
pv->ldp_pages[i], CPT_TRANSIENT);
if (IS_ERR(page)) {
rc = PTR_ERR(page);
{
struct cl_attr *attr = vvp_env_thread_attr(env);
struct cl_object *obj = io->ci_obj;
- loff_t offset = cl_offset(obj, cl_page_index(pg));
- int result;
+ loff_t offset = cl_page_index(pg) << PAGE_SHIFT;
+ int result;
ENTRY;
cl_object_attr_lock(obj);
__u32 enqflags, enum cl_lock_mode mode,
loff_t start, loff_t end)
{
- struct cl_object *obj = io->ci_obj;
-
return vvp_io_one_lock_index(env, io, enqflags, mode,
- cl_index(obj, start), cl_index(obj, end));
+ start >> PAGE_SHIFT, end >> PAGE_SHIFT);
}
static int vvp_io_write_iter_init(const struct lu_env *env,
policy_from_vma(&policy, vma, addr, count);
descr->cld_mode = vvp_mode_from_vma(vma);
descr->cld_obj = ll_i2info(inode)->lli_clob;
- descr->cld_start = cl_index(descr->cld_obj,
- policy.l_extent.start);
- descr->cld_end = cl_index(descr->cld_obj,
- policy.l_extent.end);
+ descr->cld_start = policy.l_extent.start >> PAGE_SHIFT;
+ descr->cld_end = policy.l_extent.end >> PAGE_SHIFT;
descr->cld_enq_flags = flags;
result = cl_io_lock_alloc_add(env, io, descr);
/* initialize read-ahead window once per syscall */
if (!vio->vui_ra_valid) {
vio->vui_ra_valid = true;
- vio->vui_ra_start_idx = cl_index(obj, pos);
+ vio->vui_ra_start_idx = pos >> PAGE_SHIFT;
vio->vui_ra_pages = 0;
page_offset = pos & ~PAGE_MASK;
if (page_offset) {
trunc_sem_down_read_nowait(&lli->lli_trunc_sem);
/* offset of the last byte on the page */
- offset = cl_offset(obj, fio->ft_index + 1) - 1;
- LASSERT(cl_index(obj, offset) == fio->ft_index);
+ offset = ((fio->ft_index + 1) << PAGE_SHIFT) - 1;
+ LASSERT((offset >> PAGE_SHIFT) == fio->ft_index);
result = vvp_prep_size(env, obj, io, 0, offset + 1, NULL);
if (result != 0)
RETURN(result);
GOTO(out, result = +1);
}
- last_index = cl_index(obj, size - 1);
+ last_index = (size - 1) >> PAGE_SHIFT;
if (fio->ft_mkwrite ) {
/*
/*
* Last page is mapped partially.
*/
- fio->ft_nob = size - cl_offset(obj, fio->ft_index);
+ fio->ft_nob = size - (fio->ft_index << PAGE_SHIFT);
else
fio->ft_nob = PAGE_SIZE;
case CIT_FAULT: {
pgoff_t index = io->u.ci_fault.ft_index;
- lio->lis_pos = cl_offset(io->ci_obj, index);
- lio->lis_endpos = cl_offset(io->ci_obj, index + 1);
+ lio->lis_pos = index << PAGE_SHIFT;
+ lio->lis_endpos = (index + 1) << PAGE_SHIFT;
break;
}
break;
}
case CIT_FAULT: {
- struct cl_object *obj = parent->ci_obj;
- loff_t off = cl_offset(obj, parent->u.ci_fault.ft_index);
+ loff_t off = parent->u.ci_fault.ft_index << PAGE_SHIFT;
io->u.ci_fault = parent->u.ci_fault;
off = lov_size_to_stripe(lsm, index, off, stripe);
- io->u.ci_fault.ft_index = cl_index(obj, off);
+ io->u.ci_fault.ft_index = off >> PAGE_SHIFT;
break;
}
case CIT_FSYNC: {
{
struct lov_io *lio = cl2lov_io(env, ios);
struct lov_object *loo = lio->lis_object;
- struct cl_object *obj = lov2cl(loo);
struct lov_layout_raid0 *r0;
struct lov_io_sub *sub;
loff_t offset;
int rc;
ENTRY;
- offset = cl_offset(obj, start);
+ offset = start << PAGE_SHIFT;
index = lov_io_layout_at(lio, offset);
if (index < 0 || !lsm_entry_inited(loo->lo_lsm, index) ||
lsm_entry_is_foreign(loo->lo_lsm, index))
lov_stripe_offset(loo->lo_lsm, index, offset, stripe, &suboff);
rc = cl_io_read_ahead(sub->sub_env, &sub->sub_io,
- cl_index(lovsub2cl(r0->lo_sub[stripe]), suboff),
- ra);
+ suboff >> PAGE_SHIFT, ra);
CDEBUG(D_READA, DFID " cra_end = %lu, stripes = %d, rc = %d\n",
PFID(lu_object_fid(lov2lu(loo))), ra->cra_end_idx,
ra_end, stripe);
/* boundary of current component */
- ra_end = cl_index(obj, (loff_t)lov_io_extent(lio, index)->e_end);
+ ra_end = lov_io_extent(lio, index)->e_end >> PAGE_SHIFT;
if (ra_end != CL_PAGE_EOF && ra->cra_end_idx >= ra_end)
ra->cra_end_idx = ra_end - 1;
* refer to another mirror of an old IO.
*/
if (lov_is_flr(lio->lis_object)) {
- offset = cl_offset(ios->cis_obj, fio->ft_index);
+ offset = fio->ft_index << PAGE_SHIFT;;
entry = lov_io_layout_at(lio, offset);
if (entry < 0) {
CERROR(DFID": page fault index %lu invalid component: "
LASSERT(ergo(is_trunc, lio->lis_trunc_stripe_index != NULL));
- ext.e_start = cl_offset(obj, lock->cll_descr.cld_start);
+ ext.e_start = lock->cll_descr.cld_start << PAGE_SHIFT;
if (lock->cll_descr.cld_end == CL_PAGE_EOF)
ext.e_end = OBD_OBJECT_EOF;
else
- ext.e_end = cl_offset(obj, lock->cll_descr.cld_end + 1);
+ ext.e_end = (lock->cll_descr.cld_end + 1) << PAGE_SHIFT;
nr = 0;
lov_foreach_io_layout(index, lio, &ext) {
descr = &lls->sub_lock.cll_descr;
LASSERT(descr->cld_obj == NULL);
descr->cld_obj = lovsub2cl(r0->lo_sub[i]);
- descr->cld_start = cl_index(descr->cld_obj, start);
- descr->cld_end = cl_index(descr->cld_obj, end);
+ descr->cld_start = start >> PAGE_SHIFT;
+ descr->cld_end = end >> PAGE_SHIFT;
descr->cld_mode = lock->cll_descr.cld_mode;
descr->cld_gid = lock->cll_descr.cld_gid;
descr->cld_enq_flags = lock->cll_descr.cld_enq_flags;
stripe_cached = lio->lis_cached_entry != LIS_CACHE_ENTRY_NONE &&
page->cp_type == CPT_TRANSIENT;
- offset = cl_offset(obj, index);
+ offset = index << PAGE_SHIFT;
if (stripe_cached) {
entry = lio->lis_cached_entry;
cl_object_for_each(o, subobj) {
if (o->co_ops->coo_page_init) {
rc = o->co_ops->coo_page_init(sub->sub_env, o, page,
- cl_index(subobj, suboff));
+ suboff >> PAGE_SHIFT);
if (rc != 0)
break;
}
}
int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, pgoff_t index)
+ struct cl_page *cl_page, pgoff_t index)
{
void *addr;
ENTRY;
- BUILD_BUG_ON(!__same_type(page->cp_lov_index, CP_LOV_INDEX_EMPTY));
- page->cp_lov_index = CP_LOV_INDEX_EMPTY;
+ BUILD_BUG_ON(!__same_type(cl_page->cp_lov_index, CP_LOV_INDEX_EMPTY));
+ cl_page->cp_lov_index = CP_LOV_INDEX_EMPTY;
- addr = kmap(page->cp_vmpage);
+ addr = kmap(cl_page->cp_vmpage);
memset(addr, 0, PAGE_SIZE);
- kunmap(page->cp_vmpage);
- SetPageUptodate(page->cp_vmpage);
+ kunmap(cl_page->cp_vmpage);
+ SetPageUptodate(cl_page->cp_vmpage);
RETURN(0);
}
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
/* Destroy pages covered by the extent of the DLM lock */
- result = mdc_lock_flush(env, cl2osc(obj), cl_index(obj, 0),
+ result = mdc_lock_flush(env, cl2osc(obj), 0,
CL_PAGE_EOF, mode, discard);
/* Losing a lock, set KMS to 0.
* NB: assumed that DOM lock covers whole data on MDT.
/* extend the lock extent, otherwise it will have problem when
* we decide whether to grant a lockless lock. */
descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
- descr->cld_start = cl_index(descr->cld_obj, 0);
+ descr->cld_start = 0;
descr->cld_end = CL_PAGE_EOF;
/* no lvb update for matched lock */
EXPORT_SYMBOL(cl_page_print);
/**
- * Converts a byte offset within object \a obj into a page index.
- */
-loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
-{
- return (loff_t)idx << PAGE_SHIFT;
-}
-EXPORT_SYMBOL(cl_offset);
-
-/**
- * Converts a page index into a byte offset within object \a obj.
- */
-pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
-{
- return offset >> PAGE_SHIFT;
-}
-EXPORT_SYMBOL(cl_index);
-
-/**
* Adds page slice to the compound page.
*
* This is called by cl_object_operations::coo_page_init() methods to add a
struct ldlm_extent *extent;
extent = &ext->oe_dlmlock->l_policy_data.l_extent;
- if (!(extent->start <= cl_offset(osc2cl(obj), ext->oe_start) &&
- extent->end >= cl_offset(osc2cl(obj), ext->oe_max_end)))
+ if (!(extent->start <= ext->oe_start << PAGE_SHIFT &&
+ extent->end >= ext->oe_max_end << PAGE_SHIFT))
GOTO(out, rc = 100);
if (!(ext->oe_dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP)))
if (result < 0)
return result;
kms = attr->cat_kms;
- if (cl_offset(obj, index) >= kms)
+ if (index << PAGE_SHIFT >= kms)
/* catch race with truncate */
return 0;
- else if (cl_offset(obj, index + 1) > kms)
+ else if ((index + 1) << PAGE_SHIFT > kms)
/* catch sub-page write at end of file */
return kms & ~PAGE_MASK;
else
ENTRY;
/* pages with index greater or equal to index will be truncated. */
- index = cl_index(osc2cl(obj), size);
- partial = size > cl_offset(osc2cl(obj), index);
+ index = size >> PAGE_SHIFT;
+ partial = size > (index << PAGE_SHIFT);
again:
osc_object_lock(obj);
tmp->l_policy_data.l_extent.start;
/* no lock covering this page */
- if (index < cl_index(osc2cl(osc), start)) {
+ if (index < start >> PAGE_SHIFT) {
/* no lock at @index,
* first lock at @start
*/
info->oti_ng_index =
- cl_index(osc2cl(osc), start);
+ start >> PAGE_SHIFT;
discard = true;
} else {
/* Cache the first-non-overlapped
* pages.
*/
info->oti_fn_index =
- cl_index(osc2cl(osc), end + 1);
+ (end + 1) >> PAGE_SHIFT;
if (end == OBD_OBJECT_EOF)
info->oti_fn_index =
CL_PAGE_EOF;
}
ra->cra_rpc_pages = osc_cli(osc)->cl_max_pages_per_rpc;
- ra->cra_end_idx = cl_index(osc2cl(osc),
- dlmlock->l_policy_data.l_extent.end);
+ ra->cra_end_idx =
+ dlmlock->l_policy_data.l_extent.end >> PAGE_SHIFT;
ra->cra_release = osc_read_ahead_release;
ra->cra_dlmlock = dlmlock;
ra->cra_oio = oio;
if (ra->cra_end_idx != CL_PAGE_EOF)
ra->cra_contention = true;
- ra->cra_end_idx = min_t(pgoff_t, ra->cra_end_idx,
- cl_index(osc2cl(osc),
- oinfo->loi_kms - 1));
+ ra->cra_end_idx = min_t(pgoff_t,
+ ra->cra_end_idx,
+ (oinfo->loi_kms - 1) >> PAGE_SHIFT);
result = 0;
}
ENTRY;
/* offset within stripe */
- kms = cl_offset(obj, idx) + to;
+ kms = (idx << PAGE_SHIFT) + to;
cl_object_attr_lock(obj);
CDEBUG(D_INODE, "stripe KMS %sincreasing %llu->%llu %llu\n",
int partial;
pgoff_t start;
- clob = oio->oi_cl.cis_obj;
- start = cl_index(clob, size);
- partial = cl_offset(clob, start) < size;
+ clob = oio->oi_cl.cis_obj;
+ start = size >> PAGE_SHIFT;
+ partial = (start << PAGE_SHIFT) < size;
/*
* Complain if there are pages in the truncated region.
struct cl_object *obj)
{
struct osc_object *osc = cl2osc(obj);
- pgoff_t pg_start = cl_index(obj, io->u.ci_setattr.sa_falloc_offset);
- pgoff_t pg_end = cl_index(obj, io->u.ci_setattr.sa_falloc_end - 1);
+ pgoff_t pg_start = io->u.ci_setattr.sa_falloc_offset >> PAGE_SHIFT;
+ pgoff_t pg_end = (io->u.ci_setattr.sa_falloc_end - 1) >> PAGE_SHIFT;
int rc;
ENTRY;
struct cl_fsync_io *fio = &io->u.ci_fsync;
struct cl_object *obj = slice->cis_obj;
struct osc_object *osc = cl2osc(obj);
- pgoff_t start = cl_index(obj, fio->fi_start);
- pgoff_t end = cl_index(obj, fio->fi_end);
+ pgoff_t start = fio->fi_start >> PAGE_SHIFT;
+ pgoff_t end = fio->fi_end >> PAGE_SHIFT;
int result = 0;
ENTRY;
{
struct cl_fsync_io *fio = &slice->cis_io->u.ci_fsync;
struct cl_object *obj = slice->cis_obj;
- pgoff_t start = cl_index(obj, fio->fi_start);
- pgoff_t end = cl_index(obj, fio->fi_end);
+ pgoff_t start = fio->fi_start >> PAGE_SHIFT;
+ pgoff_t end = fio->fi_end >> PAGE_SHIFT;
int result = 0;
if (fio->fi_mode == CL_FSYNC_LOCAL) {
/* extend the lock extent, otherwise it will have problem when
* we decide whether to grant a lockless lock. */
descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
- descr->cld_start = cl_index(descr->cld_obj, ext->start);
- descr->cld_end = cl_index(descr->cld_obj, ext->end);
+ descr->cld_start = ext->start >> PAGE_SHIFT;
+ descr->cld_end = ext->end >> PAGE_SHIFT;
descr->cld_gid = ext->gid;
/* no lvb update for matched lock */
/* Destroy pages covered by the extent of the DLM lock */
result = osc_lock_flush(cl2osc(obj),
- cl_index(obj, extent->start),
- cl_index(obj, extent->end),
+ extent->start >> PAGE_SHIFT,
+ extent->end >> PAGE_SHIFT,
mode, discard);
/* losing a lock, update kms */
if (result != 0)
RETURN(1);
- page_index = cl_index(obj, start);
+ page_index = start >> PAGE_SHIFT;
if (!osc_page_gang_lookup(env, io, oscobj,
- page_index, cl_index(obj, end),
+ page_index, end >> PAGE_SHIFT,
weigh_cb, (void *)&page_index))
result = 1;
cl_io_fini(env, io);
return;
if (likely(io->ci_type == CIT_WRITE)) {
- io_start = cl_index(obj, io->u.ci_rw.crw_pos);
- io_end = cl_index(obj, io->u.ci_rw.crw_pos +
- io->u.ci_rw.crw_count - 1);
+ io_start = io->u.ci_rw.crw_pos >> PAGE_SHIFT;
+ io_end = (io->u.ci_rw.crw_pos +
+ io->u.ci_rw.crw_count - 1) >> PAGE_SHIFT;
} else {
LASSERT(cl_io_is_mkwrite(io));
io_start = io_end = io->u.ci_fault.ft_index;
const struct cl_object *obj, pgoff_t start, pgoff_t end)
{
memset(policy, 0, sizeof *policy);
- policy->l_extent.start = cl_offset(obj, start);
- policy->l_extent.end = cl_offset(obj, end + 1) - 1;
+ policy->l_extent.start = start << PAGE_SHIFT;
+ policy->l_extent.end = ((end + 1) << PAGE_SHIFT) - 1;
}
static int osc_page_print(const struct lu_env *env,
INIT_LIST_HEAD(&opg->ops_lru);
- result = osc_prep_async_page(osc, opg, cl_page, cl_offset(obj, index));
+ result = osc_prep_async_page(osc, opg, cl_page, index << PAGE_SHIFT);
if (result != 0)
return result;