#define CP_STATE_BITS 4
#define CP_TYPE_BITS 2
-#define CP_MAX_LAYER 3
+#define CP_MAX_LAYER 2
/**
* Fields are protected by the lock on struct page, except for atomics and
*/
struct cl_page {
/** Reference counter. */
- atomic_t cp_ref;
+ refcount_t cp_ref;
/** layout_entry + stripe index, composed using lov_comp_index() */
unsigned int cp_lov_index;
/** page->index of the page within the whole file */
/** Linkage of pages within group. Pages must be owned */
struct list_head cp_batch;
/** array of slices offset. Immutable after creation. */
- unsigned char cp_layer_offset[CP_MAX_LAYER]; /* 24 bits */
+ unsigned char cp_layer_offset[CP_MAX_LAYER];
/** current slice index */
- unsigned char cp_layer_count:2; /* 26 bits */
+ unsigned char cp_layer_count:2;
/**
* Page state. This field is const to avoid accidental update, it is
* modified only internally within cl_page.c. Protected by a VM lock.
*/
- enum cl_page_state cp_state:CP_STATE_BITS; /* 30 bits */
+ enum cl_page_state cp_state:CP_STATE_BITS;
/**
* Page type. Only CPT_TRANSIENT is used so far. Immutable after
* creation.
*/
- enum cl_page_type cp_type:CP_TYPE_BITS; /* 32 bits */
+ enum cl_page_type cp_type:CP_TYPE_BITS;
+ unsigned cp_defer_uptodate:1,
+ cp_ra_updated:1,
+ cp_ra_used:1;
/* which slab kmem index this memory allocated from */
- short int cp_kmem_index; /* 48 bits */
- unsigned int cp_unused1:16; /* 64 bits */
+ short int cp_kmem_index;
/**
* Owning IO in cl_page_state::CPS_OWNED state. Sub-page can be owned
*/
struct cl_page_slice {
struct cl_page *cpl_page;
- /**
- * Object slice corresponding to this page slice. Immutable after
- * creation.
- */
- struct cl_object *cpl_obj;
const struct cl_page_operations *cpl_ops;
};
*
* Methods taking an \a io argument are for the activity happening in the
* context of given \a io. Page is assumed to be owned by that io, except for
- * the obvious cases (like cl_page_operations::cpo_own()).
+ * the obvious cases.
*
* \see vvp_page_ops, lov_page_ops, osc_page_ops
*/
/**
* cl_page<->struct page methods. Only one layer in the stack has to
* implement these. Current code assumes that this functionality is
- * provided by the topmost layer, see cl_page_disown0() as an example.
- */
-
- /**
- * Called when \a io acquires this page into the exclusive
- * ownership. When this method returns, it is guaranteed that the is
- * not owned by other io, and no transfer is going on against
- * it. Optional.
- *
- * \see cl_page_own()
- * \see vvp_page_own(), lov_page_own()
- */
- int (*cpo_own)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io, int nonblock);
- /** Called when ownership it yielded. Optional.
- *
- * \see cl_page_disown()
- * \see vvp_page_disown()
- */
- void (*cpo_disown)(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
- /**
- * Called for a page that is already "owned" by \a io from VM point of
- * view. Optional.
- *
- * \see cl_page_assume()
- * \see vvp_page_assume(), lov_page_assume()
- */
- void (*cpo_assume)(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
- /** Dual to cl_page_operations::cpo_assume(). Optional. Called
- * bottom-to-top when IO releases a page without actually unlocking
- * it.
- *
- * \see cl_page_unassume()
- * \see vvp_page_unassume()
- */
- void (*cpo_unassume)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
- /**
- * Announces whether the page contains valid data or not by \a uptodate.
- *
- * \see cl_page_export()
- * \see vvp_page_export()
+ * provided by the topmost layer, see __cl_page_disown() as an example.
*/
- void (*cpo_export)(const struct lu_env *env,
- const struct cl_page_slice *slice, int uptodate);
- /**
- * Checks whether underlying VM page is locked (in the suitable
- * sense). Used for assertions.
- *
- * \retval -EBUSY: page is protected by a lock of a given mode;
- * \retval -ENODATA: page is not protected by a lock;
- * \retval 0: this layer cannot decide. (Should never happen.)
- */
- int (*cpo_is_vmlocked)(const struct lu_env *env,
- const struct cl_page_slice *slice);
/**
* Update file attributes when all we have is this page. Used for tiny
*/
void (*cpo_delete)(const struct lu_env *env,
const struct cl_page_slice *slice);
- /** Destructor. Frees resources and slice itself. */
- void (*cpo_fini)(const struct lu_env *env,
- struct cl_page_slice *slice,
- struct pagevec *pvec);
/**
* Optional debugging helper. Prints given page slice.
*
*/
struct {
/**
- * Called when a page is submitted for a transfer as a part of
- * cl_page_list.
- *
- * \return 0 : page is eligible for submission;
- * \return -EALREADY : skip this page;
- * \return -ve : error.
- *
- * \see cl_page_prep()
- */
- int (*cpo_prep)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
- /**
* Completion handler. This is guaranteed to be eventually
- * fired after cl_page_operations::cpo_prep() or
- * cl_page_operations::cpo_make_ready() call.
+ * fired after cl_page_prep() or cl_page_make_ready() call.
*
* This method can be called in a non-blocking context. It is
* guaranteed however, that the page involved and its object
void (*cpo_completion)(const struct lu_env *env,
const struct cl_page_slice *slice,
int ioret);
- /**
- * Called when cached page is about to be added to the
- * ptlrpc request as a part of req formation.
- *
- * \return 0 : proceed with this page;
- * \return -EAGAIN : skip this page;
- * \return -ve : error.
- *
- * \see cl_page_make_ready()
- */
- int (*cpo_make_ready)(const struct lu_env *env,
- const struct cl_page_slice *slice);
} io[CRT_NR];
/**
* Tell transfer engine that only [to, from] part of a page should be
return page->cp_vmpage;
}
+static inline pgoff_t cl_page_index(const struct cl_page *cp)
+{
+ return cl_page_vmpage(cp)->index;
+}
+
/**
* Check if a cl_page is in use.
*
*/
static inline bool __page_in_use(const struct cl_page *page, int refc)
{
- return (atomic_read(&page->cp_ref) > refc + 1);
+ return (refcount_read(&page->cp_ref) > refc + 1);
}
/**
lu_printer_t printer,
const struct cl_page *pg);
struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj);
-struct cl_page *cl_page_top (struct cl_page *page);
-
-const struct cl_page_slice *cl_page_at(const struct cl_page *page,
- const struct lu_device_type *dtype);
/**
* \name ownership
void cl_page_discard(const struct lu_env *env, struct cl_io *io,
struct cl_page *pg);
void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
-int cl_page_is_vmlocked(const struct lu_env *env,
- const struct cl_page *pg);
void cl_page_touch(const struct lu_env *env, const struct cl_page *pg,
size_t to);
-void cl_page_export(const struct lu_env *env,
- struct cl_page *pg, int uptodate);
loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
pgoff_t cl_index(const struct cl_object *obj, loff_t offset);
size_t cl_page_size(const struct cl_object *obj);
* # of client cache refcount
* # of users (OSCs) + 2 (held by llite and lov)
*/
- atomic_t ccc_users;
+ refcount_t ccc_users;
/**
* # of threads are doing shrinking
*/
void cl_page_list_del(const struct lu_env *env,
struct cl_page_list *plist, struct cl_page *page);
void cl_page_list_disown(const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist);
+ struct cl_page_list *plist);
void cl_page_list_assume(const struct lu_env *env,
struct cl_io *io, struct cl_page_list *plist);
void cl_page_list_discard(const struct lu_env *env,
void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist);
void cl_2queue_init(struct cl_2queue *queue);
-void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page,
- bool get_ref);
-void cl_2queue_disown(const struct lu_env *env, struct cl_io *io,
- struct cl_2queue *queue);
+void cl_2queue_disown(const struct lu_env *env, struct cl_2queue *queue);
void cl_2queue_assume(const struct lu_env *env, struct cl_io *io,
struct cl_2queue *queue);
void cl_2queue_discard(const struct lu_env *env, struct cl_io *io,
struct brw_page oap_brw_page;
struct ptlrpc_request *oap_request;
- struct client_obd *oap_cli;
struct osc_object *oap_obj;
spinlock_t oap_lock;
return opg->ops_oap.oap_obj_off >> PAGE_SHIFT;
}
+static inline struct osc_object *osc_page_object(struct osc_page *ops)
+{
+ return ops->ops_oap.oap_obj;
+}
+
static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
{
return oap2osc(oap)->ops_cl.cpl_page;
put_page(vmpage);
break;
}
- cl_page_export(env, page, 1);
+ SetPageUptodate(vmpage);
cl_page_put(env, page);
unlock_page(vmpage);
put_page(vmpage);
anchor = &vvp_env_info(env)->vti_anchor;
cl_sync_io_init(anchor, 1);
clpage->cp_sync_io = anchor;
- cl_2queue_add(queue, clpage, true);
+ cl_page_list_add(&queue->c2_qin, clpage, true);
rc = cl_io_submit_rw(env, io, CRT_WRITE, queue);
if (rc)
GOTO(queuefini1, rc);
queuefini2:
cl_2queue_discard(env, io, queue);
queuefini1:
- cl_2queue_disown(env, io, queue);
+ cl_2queue_disown(env, queue);
cl_2queue_fini(env, queue);
}
"reclaim_count: %u\n"
"max_read_ahead_mb: %lu\n"
"used_read_ahead_mb: %d\n",
- atomic_read(&cache->ccc_users),
+ refcount_read(&cache->ccc_users),
max_cached_mb,
max_cached_mb - unused_mb,
unused_mb,
struct cl_object *clob = io->ci_obj;
struct inode *inode = vvp_object_inode(clob);
struct page *vmpage = NULL;
- struct cl_page *page;
- struct vvp_page *vpg;
+ struct cl_page *cp;
enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
int rc = 0;
const char *msg = NULL;
/* should not come here */
GOTO(out, rc = -EINVAL);
}
-
+
/* Check if vmpage was truncated or reclaimed */
if (vmpage->mapping != inode->i_mapping) {
which = RA_STAT_WRONG_GRAB_PAGE;
GOTO(out, rc = -EBUSY);
}
- page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
- if (IS_ERR(page)) {
+ cp = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
+ if (IS_ERR(cp)) {
which = RA_STAT_FAILED_GRAB_PAGE;
msg = "cl_page_find failed";
- GOTO(out, rc = PTR_ERR(page));
+ GOTO(out, rc = PTR_ERR(cp));
}
- lu_ref_add(&page->cp_reference, "ra", current);
- cl_page_assume(env, io, page);
- vpg = cl2vvp_page(cl_object_page_slice(clob, page));
- if (!vpg->vpg_defer_uptodate && !PageUptodate(vmpage)) {
+ lu_ref_add(&cp->cp_reference, "ra", current);
+ cl_page_assume(env, io, cp);
+
+ if (!cp->cp_defer_uptodate && !PageUptodate(vmpage)) {
if (hint == MAYNEED) {
- vpg->vpg_defer_uptodate = 1;
- vpg->vpg_ra_used = 0;
+ cp->cp_defer_uptodate = 1;
+ cp->cp_ra_used = 0;
}
- cl_page_list_add(queue, page, true);
+
+ cl_page_list_add(queue, cp, true);
} else {
/* skip completed pages */
- cl_page_unassume(env, io, page);
+ cl_page_unassume(env, io, cp);
/* This page is already uptodate, returning a positive number
* to tell the callers about this */
rc = 1;
}
- lu_ref_del(&page->cp_reference, "ra", current);
- cl_page_put(env, page);
+ lu_ref_del(&cp->cp_reference, "ra", current);
+ cl_page_put(env, cp);
out:
if (vmpage != NULL) {
cl_page_list_discard(env, io, &queue->c2_qin);
/* Unlock unsent read pages in case of error. */
- cl_page_list_disown(env, io, &queue->c2_qin);
+ cl_page_list_disown(env, &queue->c2_qin);
cl_2queue_fini(env, queue);
out_io_fini:
struct ll_readahead_state *ras = NULL;
struct cl_2queue *queue = &io->ci_queue;
struct cl_sync_io *anchor = NULL;
- struct vvp_page *vpg;
int rc = 0, rc2 = 0;
bool uptodate;
struct vvp_io *vio = vvp_env_io(env);
if (page->cp_vmpage && PagePrivate2(page->cp_vmpage))
unlockpage = false;
- vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page));
- uptodate = vpg->vpg_defer_uptodate;
+ uptodate = page->cp_defer_uptodate;
- if (ll_readahead_enabled(sbi) && !vpg->vpg_ra_updated && ras) {
+ if (ll_readahead_enabled(sbi) && !page->cp_ra_updated && ras) {
enum ras_update_flags flags = 0;
if (uptodate)
flags |= LL_RAS_HIT;
if (mmap)
flags |= LL_RAS_MMAP;
- ras_update(sbi, inode, ras, vvp_index(vpg), flags, io);
+ ras_update(sbi, inode, ras, cl_page_index(page), flags, io);
}
cl_2queue_init(queue);
if (uptodate) {
- vpg->vpg_ra_used = 1;
- cl_page_export(env, page, 1);
+ page->cp_ra_used = 1;
+ SetPageUptodate(page->cp_vmpage);
cl_page_disown(env, io, page);
} else {
anchor = &vvp_env_info(env)->vti_anchor;
cl_sync_io_init(anchor, 1);
page->cp_sync_io = anchor;
- cl_2queue_add(queue, page, true);
+ cl_page_list_add(&queue->c2_qin, page, true);
}
/* mmap does not set the ci_rw fields */
io_end_index = cl_index(io->ci_obj, io->u.ci_rw.crw_pos +
io->u.ci_rw.crw_count - 1);
} else {
- io_start_index = vvp_index(vpg);
- io_end_index = vvp_index(vpg);
+ io_start_index = cl_page_index(page);
+ io_end_index = cl_page_index(page);
}
if (ll_readahead_enabled(sbi) && ras && !io->ci_rand_read) {
pgoff_t skip_index = 0;
- if (ras->ras_next_readahead_idx < vvp_index(vpg))
- skip_index = vvp_index(vpg);
+ if (ras->ras_next_readahead_idx < cl_page_index(page))
+ skip_index = cl_page_index(page);
rc2 = ll_readahead(env, io, &queue->c2_qin, ras,
uptodate, file, skip_index,
&ra_start_index);
CDEBUG(D_READA | (rc2 ? D_IOTRACE : 0),
DFID " %d pages read ahead at %lu, triggered by user read at %lu, stride offset %lld, stride length %lld, stride bytes %lld\n",
PFID(ll_inode2fid(inode)), rc2, ra_start_index,
- vvp_index(vpg), ras->ras_stride_offset,
+ cl_page_index(page), ras->ras_stride_offset,
ras->ras_stride_length, ras->ras_stride_bytes);
- } else if (vvp_index(vpg) == io_start_index &&
+ } else if (cl_page_index(page) == io_start_index &&
io_end_index - io_start_index > 0) {
rc2 = ll_readpages(env, io, &queue->c2_qin, io_start_index + 1,
io_end_index);
CDEBUG(D_READA, DFID " %d pages read at %lu\n",
- PFID(ll_inode2fid(inode)), rc2, vvp_index(vpg));
+ PFID(ll_inode2fid(inode)), rc2, cl_page_index(page));
}
if (queue->c2_qin.pl_nr > 0) {
cl_page_list_discard(env, io, &queue->c2_qin);
/* Unlock unsent read pages in case of error. */
- cl_page_list_disown(env, io, &queue->c2_qin);
+ cl_page_list_disown(env, &queue->c2_qin);
cl_2queue_fini(env, queue);
struct ll_file_data *fd = file->private_data;
struct ll_readahead_state *ras = &fd->fd_ras;
struct lu_env *local_env = NULL;
- struct vvp_page *vpg;
CDEBUG(D_VFSTRACE, "fast read pgno: %ld\n", vmpage->index);
RETURN(result);
}
- vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page));
- if (vpg->vpg_defer_uptodate) {
+ if (page->cp_defer_uptodate) {
enum ras_update_flags flags = LL_RAS_HIT;
if (lcc && lcc->lcc_type == LCC_MMAP)
/* For fast read, it updates read ahead state only
* if the page is hit in cache because non cache page
* case will be handled by slow read later. */
- ras_update(sbi, inode, ras, vvp_index(vpg), flags, io);
+ ras_update(sbi, inode, ras, cl_page_index(page), flags, io);
/* avoid duplicate ras_update() call */
- vpg->vpg_ra_updated = 1;
+ page->cp_ra_updated = 1;
- if (ll_use_fast_io(file, ras, vvp_index(vpg)))
+ if (ll_use_fast_io(file, ras, cl_page_index(page)))
result = 0;
}
/* export the page and skip io stack */
if (result == 0) {
- vpg->vpg_ra_used = 1;
- cl_page_export(env, page, 1);
+ page->cp_ra_used = 1;
+ SetPageUptodate(vmpage);
} else {
ll_ra_stats_inc_sbi(sbi, RA_STAT_FAILED_FAST_READ);
}
+
/* release page refcount before unlocking the page to ensure
* the object won't be destroyed in the calling path of
* cl_page_put(). Please see comment in ll_releasepage(). */
/* We keep the refcount from cl_page_find, so we don't need
* another one here
*/
- cl_2queue_add(queue, page, false);
+ cl_page_list_add(&queue->c2_qin, page, false);
/*
* Set page clip to tell transfer formation engine
* that page has to be sent even if it is beyond KMS.
}
cl_2queue_discard(env, io, queue);
- cl_2queue_disown(env, io, queue);
+ cl_2queue_disown(env, queue);
cl_2queue_fini(env, queue);
RETURN(rc);
}
{
struct cl_attr *attr = vvp_env_thread_attr(env);
struct cl_object *obj = io->ci_obj;
- struct vvp_page *vpg = cl_object_page_slice(obj, pg);
- loff_t offset = cl_offset(obj, vvp_index(vpg));
+ loff_t offset = cl_offset(obj, cl_page_index(pg));
int result;
ENTRY;
* purposes here we can treat it like i_size.
*/
if (attr->cat_kms <= offset) {
- char *kaddr = kmap_atomic(vpg->vpg_page);
+ char *kaddr = kmap_atomic(pg->cp_vmpage);
memset(kaddr, 0, cl_page_size(obj));
kunmap_atomic(kaddr);
GOTO(out, result = 0);
}
- if (vpg->vpg_defer_uptodate) {
- vpg->vpg_ra_used = 1;
+ if (pg->cp_defer_uptodate) {
+ pg->cp_ra_used = 1;
GOTO(out, result = 0);
}
static void vvp_pgcache_page_show(const struct lu_env *env,
struct seq_file *seq, struct cl_page *page)
{
- struct vvp_page *vpg;
- struct page *vmpage;
- int has_flags;
+ struct page *vmpage;
+ int has_flags;
- vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
- vmpage = vpg->vpg_page;
- seq_printf(seq, " %5i | %p %p %s %s %s | %p "DFID"(%p) %lu %u [",
+ vmpage = page->cp_vmpage;
+ seq_printf(seq, " %5i | %p %p %s %s | %p "DFID"(%p) %lu %u [",
0 /* gen */,
- vpg, page,
+ NULL, /* was vvp_page */
+ page,
"none",
- vpg->vpg_defer_uptodate ? "du" : "- ",
PageWriteback(vmpage) ? "wb" : "-",
vmpage,
PFID(ll_inode2fid(vmpage->mapping->host)),
};
/**
- * VVP-private page state.
+ * There is no VVP-private page state.
*/
-struct vvp_page {
- struct cl_page_slice vpg_cl;
- unsigned vpg_defer_uptodate:1,
- vpg_ra_updated:1,
- vpg_ra_used:1;
- /** VM page */
- struct page *vpg_page;
-};
-
-static inline struct vvp_page *cl2vvp_page(const struct cl_page_slice *slice)
-{
- return container_of(slice, struct vvp_page, vpg_cl);
-}
-
-static inline pgoff_t vvp_index(struct vvp_page *vpg)
-{
- return vpg->vpg_page->index;
-}
struct vvp_device {
struct cl_device vdv_cl;
int vvp_object_invariant(const struct cl_object *obj);
struct vvp_object *cl_inode2vvp(struct inode *inode);
-static inline struct page *cl2vm_page(const struct cl_page_slice *slice)
-{
- return cl2vvp_page(slice)->vpg_page;
-}
-
#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
# define CLOBINVRNT(env, clob, expr) \
do { \
pgoff_t index = CL_PAGE_EOF;
cl_page_list_for_each(page, plist) {
- struct vvp_page *vpg = cl_object_page_slice(obj, page);
-
if (index == CL_PAGE_EOF) {
- index = vvp_index(vpg);
+ index = cl_page_index(page);
continue;
}
++index;
- if (index == vvp_index(vpg))
+ if (index == cl_page_index(page))
continue;
return false;
wait_on_page_writeback(vmpage);
if (!PageDirty(vmpage)) {
struct cl_page_list *plist = &vio->u.fault.ft_queue;
- struct vvp_page *vpg = cl_object_page_slice(obj, page);
int to = PAGE_SIZE;
/* vvp_page_assume() calls wait_on_page_writeback(). */
cl_page_list_add(plist, page, true);
/* size fixup */
- if (last_index == vvp_index(vpg))
+ if (last_index == cl_page_index(page))
to = ((size - 1) & ~PAGE_MASK) + 1;
/* Do not set Dirty bit here so that in case IO is
const struct cl_object_conf *conf)
{
vob->vob_inode = conf->coc_inode;
- cl_object_page_init(&vob->vob_cl, sizeof(struct vvp_page));
+ cl_object_page_init(&vob->vob_cl, sizeof(struct cl_page_slice));
return 0;
}
* Page operations.
*
*/
-static void vvp_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice,
- struct pagevec *pvec)
-{
- struct vvp_page *vpg = cl2vvp_page(slice);
- struct page *vmpage = vpg->vpg_page;
-
- /*
- * vmpage->private was already cleared when page was moved into
- * VPG_FREEING state.
- */
- LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
- LASSERT(vmpage != NULL);
- if (pvec) {
- if (!pagevec_add(pvec, vmpage))
- pagevec_release(pvec);
- } else {
- put_page(vmpage);
- }
-}
-
-static int vvp_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io,
- int nonblock)
-{
- struct vvp_page *vpg = cl2vvp_page(slice);
- struct page *vmpage = vpg->vpg_page;
-
- ENTRY;
-
- LASSERT(vmpage != NULL);
- if (nonblock) {
- if (!trylock_page(vmpage))
- return -EAGAIN;
-
- if (unlikely(PageWriteback(vmpage))) {
- unlock_page(vmpage);
- return -EAGAIN;
- }
-
- return 0;
- }
-
- lock_page(vmpage);
- wait_on_page_writeback(vmpage);
-
- RETURN(0);
-}
-
-static void vvp_page_assume(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct page *vmpage = cl2vm_page(slice);
-
- LASSERT(vmpage != NULL);
- LASSERT(PageLocked(vmpage));
- wait_on_page_writeback(vmpage);
-}
-
-static void vvp_page_unassume(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct page *vmpage = cl2vm_page(slice);
-
- LASSERT(vmpage != NULL);
- LASSERT(PageLocked(vmpage));
-}
-
-static void vvp_page_disown(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io)
-{
- struct page *vmpage = cl2vm_page(slice);
-
- ENTRY;
-
- LASSERT(vmpage != NULL);
- LASSERT(PageLocked(vmpage));
-
- unlock_page(cl2vm_page(slice));
-
- EXIT;
-}
-
static void vvp_page_discard(const struct lu_env *env,
const struct cl_page_slice *slice,
struct cl_io *unused)
{
- struct page *vmpage = cl2vm_page(slice);
- struct vvp_page *vpg = cl2vvp_page(slice);
-
- LASSERT(vmpage != NULL);
- LASSERT(PageLocked(vmpage));
+ struct cl_page *cp = slice->cpl_page;
+ struct page *vmpage = cp->cp_vmpage;
- if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used && vmpage->mapping)
+ if (cp->cp_defer_uptodate && !cp->cp_ra_used && vmpage->mapping != NULL)
ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
-
- generic_error_remove_page(vmpage->mapping, vmpage);
-}
-
-static void vvp_page_export(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int uptodate)
-{
- struct page *vmpage = cl2vm_page(slice);
-
- LASSERT(vmpage != NULL);
- LASSERT(PageLocked(vmpage));
- if (uptodate)
- SetPageUptodate(vmpage);
- else
- ClearPageUptodate(vmpage);
-}
-
-static int vvp_page_is_vmlocked(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
-}
-
-static int vvp_page_prep_read(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- ENTRY;
- /* Skip the page already marked as PG_uptodate. */
- RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
-}
-
-static int vvp_page_prep_write(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct page *vmpage = cl2vm_page(slice);
- struct cl_page *pg = slice->cpl_page;
-
- LASSERT(PageLocked(vmpage));
- LASSERT(!PageDirty(vmpage));
-
- /* ll_writepage path is not a sync write, so need to set page writeback
- * flag
- */
- if (pg->cp_sync_io == NULL)
- set_page_writeback(vmpage);
-
- return 0;
}
static void vvp_page_delete(const struct lu_env *env,
LASSERT((struct cl_page *)vmpage->private == cp);
/* Drop the reference count held in vvp_page_init */
- atomic_dec(&cp->cp_ref);
+ refcount_dec(&cp->cp_ref);
ClearPagePrivate(vmpage);
vmpage->private = 0;
const struct cl_page_slice *slice,
int ioret)
{
- struct vvp_page *vpg = cl2vvp_page(slice);
- struct page *vmpage = vpg->vpg_page;
- struct cl_page *page = slice->cpl_page;
- struct inode *inode = vvp_object_inode(page->cp_obj);
+ struct cl_page *cp = slice->cpl_page;
+ struct page *vmpage = cp->cp_vmpage;
+ struct inode *inode = vvp_object_inode(cp->cp_obj);
ENTRY;
LASSERT(PageLocked(vmpage));
- CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
+ CL_PAGE_HEADER(D_PAGE, env, cp, "completing READ with %d\n", ioret);
- if (vpg->vpg_defer_uptodate)
+ if (cp->cp_defer_uptodate)
ll_ra_count_put(ll_i2sbi(inode), 1);
if (ioret == 0) {
- if (!vpg->vpg_defer_uptodate)
- cl_page_export(env, page, 1);
- } else if (vpg->vpg_defer_uptodate) {
- vpg->vpg_defer_uptodate = 0;
+ if (!cp->cp_defer_uptodate)
+ SetPageUptodate(vmpage);
+ } else if (cp->cp_defer_uptodate) {
+ cp->cp_defer_uptodate = 0;
if (ioret == -EAGAIN) {
/* mirror read failed, it needs to destroy the page
* because subpage would be from wrong osc when trying
}
}
- if (page->cp_sync_io == NULL)
+ if (cp->cp_sync_io == NULL)
unlock_page(vmpage);
EXIT;
const struct cl_page_slice *slice,
int ioret)
{
- struct vvp_page *vpg = cl2vvp_page(slice);
- struct cl_page *pg = slice->cpl_page;
- struct page *vmpage = vpg->vpg_page;
+ struct cl_page *cp = slice->cpl_page;
+ struct page *vmpage = cp->cp_vmpage;
ENTRY;
- CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
+ CL_PAGE_HEADER(D_PAGE, env, cp, "completing WRITE with %d\n", ioret);
- if (pg->cp_sync_io != NULL) {
+ if (cp->cp_sync_io != NULL) {
LASSERT(PageLocked(vmpage));
LASSERT(!PageWriteback(vmpage));
} else {
* Only mark the page error only when it's an async write
* because applications won't wait for IO to finish.
*/
- vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
+ vvp_vmpage_error(vvp_object_inode(cp->cp_obj), vmpage, ioret);
end_page_writeback(vmpage);
}
EXIT;
}
-/**
- * Implements cl_page_operations::cpo_make_ready() method.
- *
- * This is called to yank a page from the transfer cache and to send it out as
- * a part of transfer. This function try-locks the page. If try-lock failed,
- * page is owned by some concurrent IO, and should be skipped (this is bad,
- * but hopefully rare situation, as it usually results in transfer being
- * shorter than possible).
- *
- * \retval 0 success, page can be placed into transfer
- *
- * \retval -EAGAIN page is either used by concurrent IO has been
- * truncated. Skip it.
- */
-static int vvp_page_make_ready(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- struct page *vmpage = cl2vm_page(slice);
- struct cl_page *pg = slice->cpl_page;
- int result = 0;
-
- lock_page(vmpage);
- if (clear_page_dirty_for_io(vmpage)) {
- LASSERT(pg->cp_state == CPS_CACHED);
- /* This actually clears the dirty bit in the radix
- * tree.
- */
- set_page_writeback(vmpage);
- CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
- } else if (pg->cp_state == CPS_PAGEOUT) {
- /* is it possible for osc_flush_async_page() to already
- * make it ready?
- */
- result = -EALREADY;
- } else {
- CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
- pg->cp_state);
- LBUG();
- }
- unlock_page(vmpage);
- RETURN(result);
-}
-
-static int vvp_page_print(const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t printer)
-{
- struct vvp_page *vpg = cl2vvp_page(slice);
- struct page *vmpage = vpg->vpg_page;
-
- (*printer)(env, cookie,
- LUSTRE_VVP_NAME"-page@%p(%d:%d) vm@%p ",
- vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, vmpage);
-
- if (vmpage != NULL) {
- (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
- (long)vmpage->flags, page_count(vmpage),
- page_mapcount(vmpage), vmpage->private,
- page_index(vmpage),
- list_empty(&vmpage->lru) ? "not-" : "");
- }
-
- (*printer)(env, cookie, "\n");
-
- return 0;
-}
-
-static int vvp_page_fail(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- /*
- * Cached read?
- */
- LBUG();
-
- return 0;
-}
-
static const struct cl_page_operations vvp_page_ops = {
- .cpo_own = vvp_page_own,
- .cpo_assume = vvp_page_assume,
- .cpo_unassume = vvp_page_unassume,
- .cpo_disown = vvp_page_disown,
+ .cpo_delete = vvp_page_delete,
.cpo_discard = vvp_page_discard,
- .cpo_delete = vvp_page_delete,
- .cpo_export = vvp_page_export,
- .cpo_is_vmlocked = vvp_page_is_vmlocked,
- .cpo_fini = vvp_page_fini,
- .cpo_print = vvp_page_print,
.io = {
[CRT_READ] = {
- .cpo_prep = vvp_page_prep_read,
.cpo_completion = vvp_page_completion_read,
- .cpo_make_ready = vvp_page_fail,
},
[CRT_WRITE] = {
- .cpo_prep = vvp_page_prep_write,
.cpo_completion = vvp_page_completion_write,
- .cpo_make_ready = vvp_page_make_ready,
},
},
};
-static void vvp_transient_page_discard(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct cl_page *page = slice->cpl_page;
-
- /*
- * For transient pages, remove it from the radix tree.
- */
- cl_page_delete(env, page);
-}
-
-static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- return -EBUSY;
-}
-
static const struct cl_page_operations vvp_transient_page_ops = {
- .cpo_discard = vvp_transient_page_discard,
- .cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
- .cpo_print = vvp_page_print,
};
int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, pgoff_t index)
{
- struct vvp_page *vpg = cl_object_page_slice(obj, page);
- struct page *vmpage = page->cp_vmpage;
+ struct cl_page_slice *cpl = cl_object_page_slice(obj, page);
+ struct page *vmpage = page->cp_vmpage;
CLOBINVRNT(env, obj, vvp_object_invariant(obj));
- vpg->vpg_page = vmpage;
-
if (page->cp_type == CPT_TRANSIENT) {
/* DIO pages are referenced by userspace, we don't need to take
* a reference on them. (contrast with get_page() call above)
*/
- cl_page_slice_add(page, &vpg->vpg_cl, obj,
+ cl_page_slice_add(page, cpl, obj,
&vvp_transient_page_ops);
} else {
get_page(vmpage);
- /* in cache, decref in vvp_page_delete */
- atomic_inc(&page->cp_ref);
+ /* in cache, decref in cl_page_delete() */
+ refcount_inc(&page->cp_ref);
SetPagePrivate(vmpage);
vmpage->private = (unsigned long)page;
- cl_page_slice_add(page, &vpg->vpg_cl, obj,
- &vvp_page_ops);
+ cl_page_slice_add(page, cpl, obj, &vvp_page_ops);
}
return 0;
/** \defgroup lov lov
* Logical object volume layer. This layer implements data striping (raid0).
*
- * At the lov layer top-entity (object, page, lock, io) is connected to one or
+ * At the lov layer top-entity (object, lock, io) is connected to one or
* more sub-entities: top-object, representing a file is connected to a set of
* sub-objects, each representing a stripe, file-level top-lock is connected
- * to a set of per-stripe sub-locks, top-page is connected to a (single)
- * sub-page, and a top-level IO is connected to a set of (potentially
- * concurrent) sub-IO's.
+ * to a set of per-stripe sub-locks, and a top-level IO is connected to a set of
+ * (potentially concurrent) sub-IO's.
*
- * Sub-object, sub-page, and sub-io have well-defined top-object and top-page
+ * Sub-object and sub-io have well-defined top-object and top-io
* respectively, while a single sub-lock can be part of multiple top-locks.
*
* Reference counting models are different for different types of entities:
* - top-object keeps a reference to its sub-objects, and destroys them
* when it is destroyed.
*
- * - top-page keeps a reference to its sub-page, and destroys it when it
- * is destroyed.
- *
* - IO's are not reference counted.
*
* To implement a connection between top and sub entities, lov layer is split
struct lov_lock_sub lls_sub[0];
};
-struct lov_page {
- struct cl_page_slice lps_cl;
-};
-
/*
* Bottom half.
*/
struct lov_io_sub *lov_sub_get(const struct lu_env *env, struct lov_io *lio,
int stripe);
+enum {
+ CP_LOV_INDEX_EMPTY = -1U,
+};
+
+static inline bool lov_page_is_empty(const struct cl_page *cp)
+{
+ return cp->cp_lov_index == CP_LOV_INDEX_EMPTY;
+}
+
int lov_page_init_empty (const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, pgoff_t index);
int lov_page_init_composite(const struct lu_env *env, struct cl_object *obj,
const struct lu_object_header *hdr,
struct lu_device *dev);
-int lov_page_stripe(const struct cl_page *page);
-bool lov_page_is_empty(const struct cl_page *page);
int lov_lsm_entry(const struct lov_stripe_md *lsm, __u64 offset);
int lov_io_layout_at(struct lov_io *lio, __u64 offset);
return container_of(slice, struct lov_lock, lls_cl);
}
-static inline struct lov_page *cl2lov_page(const struct cl_page_slice *slice)
-{
- LINVRNT(lov_is_object(&slice->cpl_obj->co_lu));
- return container_of(slice, struct lov_page, lps_cl);
-}
-
static inline struct lov_io *cl2lov_io(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct cl_object *o;
if (stripe == NULL)
- return hdr->coh_page_bufsize - lov->lo_cl.co_slice_off -
- cfs_size_round(sizeof(struct lov_page));
+ return hdr->coh_page_bufsize - lov->lo_cl.co_slice_off;
cl_object_for_each(o, stripe)
o->co_slice_off += hdr->coh_page_bufsize;
init_rwsem(&lov->lo_type_guard);
atomic_set(&lov->lo_active_ios, 0);
init_waitqueue_head(&lov->lo_waitq);
- cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
+ cl_object_page_init(lu2cl(obj), 0);
lov->lo_type = LLT_EMPTY;
if (cconf->u.coc_layout.lb_buf != NULL) {
#define DEBUG_SUBSYSTEM S_LOV
#include "lov_cl_internal.h"
+#include <linux/bug.h>
+#include <linux/compiler.h>
/** \addtogroup lov
* @{
* Lov page operations.
*
*/
-
-static int lov_comp_page_print(const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t printer)
-{
- struct lov_page *lp = cl2lov_page(slice);
-
- return (*printer)(env, cookie,
- LUSTRE_LOV_NAME"-page@%p\n", lp);
-}
-
-static const struct cl_page_operations lov_comp_page_ops = {
- .cpo_print = lov_comp_page_print
-};
-
int lov_page_init_composite(const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, pgoff_t index)
{
struct cl_object *subobj;
struct cl_object *o;
struct lov_io_sub *sub;
- struct lov_page *lpg = cl_object_page_slice(obj, page);
struct lov_layout_raid0 *r0;
loff_t offset;
loff_t suboff;
offset, entry, stripe, suboff);
page->cp_lov_index = lov_comp_index(entry, stripe);
- cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_comp_page_ops);
+ LASSERT(page->cp_lov_index != CP_LOV_INDEX_EMPTY);
if (!stripe_cached) {
sub = lov_sub_get(env, lio, page->cp_lov_index);
RETURN(rc);
}
-static int lov_empty_page_print(const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t printer)
-{
- struct lov_page *lp = cl2lov_page(slice);
-
- return (*printer)(env, cookie, LUSTRE_LOV_NAME"-page@%p, empty.\n", lp);
-}
-
-static const struct cl_page_operations lov_empty_page_ops = {
- .cpo_print = lov_empty_page_print
-};
-
int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, pgoff_t index)
{
- struct lov_page *lpg = cl_object_page_slice(obj, page);
void *addr;
ENTRY;
+ BUILD_BUG_ON(!__same_type(page->cp_lov_index, CP_LOV_INDEX_EMPTY));
+ page->cp_lov_index = CP_LOV_INDEX_EMPTY;
- page->cp_lov_index = ~0;
- cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_empty_page_ops);
addr = kmap(page->cp_vmpage);
memset(addr, 0, cl_page_size(obj));
kunmap(page->cp_vmpage);
- cl_page_export(env, page, 1);
+ SetPageUptodate(page->cp_vmpage);
RETURN(0);
}
RETURN(-ENODATA);
}
-bool lov_page_is_empty(const struct cl_page *page)
-{
- const struct cl_page_slice *slice = cl_page_at(page, &lov_device_type);
-
- LASSERT(slice != NULL);
- return slice->cpl_ops == &lov_empty_page_ops;
-}
-
-
/** @} lov */
-
extern unsigned short cl_page_kmem_size_array[16];
struct cl_thread_info *cl_env_info(const struct lu_env *env);
-void cl_page_disown0(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg);
+void __cl_page_disown(const struct lu_env *env, struct cl_page *pg);
#endif /* _CL_INTERNAL_H */
}
EXPORT_SYMBOL(cl_io_fini);
-static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, struct cl_object *obj)
+static int __cl_io_init(const struct lu_env *env, struct cl_io *io,
+ enum cl_io_type iot, struct cl_object *obj)
{
struct cl_object *scan;
int result;
{
LASSERT(obj != cl_object_top(obj));
- return cl_io_init0(env, io, iot, obj);
+ return __cl_io_init(env, io, iot, obj);
}
EXPORT_SYMBOL(cl_io_sub_init);
/* clear I/O restart from previous instance */
io->ci_need_restart = 0;
- return cl_io_init0(env, io, iot, obj);
+ return __cl_io_init(env, io, iot, obj);
}
EXPORT_SYMBOL(cl_io_init);
struct cl_page_list *plist, struct cl_page *page)
{
LASSERT(plist->pl_nr > 0);
- LASSERT(cl_page_is_vmlocked(env, page));
ENTRY;
list_del_init(&page->cp_batch);
/**
* Disowns pages in a queue.
*/
-void cl_page_list_disown(const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist)
+void cl_page_list_disown(const struct lu_env *env, struct cl_page_list *plist)
{
struct cl_page *page;
struct cl_page *temp;
-
ENTRY;
cl_page_list_for_each_safe(page, temp, plist) {
LASSERT(plist->pl_nr > 0);
list_del_init(&page->cp_batch);
--plist->pl_nr;
/*
- * cl_page_disown0 rather than usual cl_page_disown() is used,
+ * __cl_page_disown rather than usual cl_page_disown() is used,
* because pages are possibly in CPS_FREEING state already due
* to the call to cl_page_list_discard().
*/
/*
- * XXX cl_page_disown0() will fail if page is not locked.
+ * XXX __cl_page_disown() will fail if page is not locked.
*/
- cl_page_disown0(env, io, page);
+ __cl_page_disown(env, page);
lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
plist);
cl_page_put(env, page);
EXPORT_SYMBOL(cl_2queue_init);
/**
- * Add a page to the incoming page list of 2-queue.
- */
-void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page, bool get_ref)
-{
- cl_page_list_add(&queue->c2_qin, page, get_ref);
-}
-EXPORT_SYMBOL(cl_2queue_add);
-
-/**
* Disown pages in both lists of a 2-queue.
*/
-void cl_2queue_disown(const struct lu_env *env,
- struct cl_io *io, struct cl_2queue *queue)
+void cl_2queue_disown(const struct lu_env *env, struct cl_2queue *queue)
{
ENTRY;
- cl_page_list_disown(env, io, &queue->c2_qin);
- cl_page_list_disown(env, io, &queue->c2_qout);
+ cl_page_list_disown(env, &queue->c2_qin);
+ cl_page_list_disown(env, &queue->c2_qout);
EXIT;
}
EXPORT_SYMBOL(cl_2queue_disown);
{
ENTRY;
cl_2queue_init(queue);
- cl_2queue_add(queue, page, true);
+ /*
+ * Add a page to the incoming page list of 2-queue.
+ */
+ cl_page_list_add(&queue->c2_qin, page, true);
EXIT;
}
EXPORT_SYMBOL(cl_2queue_init_page);
#include <cl_object.h>
#include "cl_internal.h"
-static void cl_lock_trace0(int level, const struct lu_env *env,
- const char *prefix, const struct cl_lock *lock,
- const char *func, const int line)
+static void __cl_lock_trace(int level, const struct lu_env *env,
+ const char *prefix, const struct cl_lock *lock,
+ const char *func, const int line)
{
struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
CDEBUG(level, "%s: %p (%p/%d) at %s():%d\n",
prefix, lock, env, h->coh_nesting, func, line);
}
#define cl_lock_trace(level, env, prefix, lock) \
- cl_lock_trace0(level, env, prefix, lock, __FUNCTION__, __LINE__)
+ __cl_lock_trace(level, env, prefix, lock, __FUNCTION__, __LINE__)
/**
* Adds lock slice to the compound lock.
#include <cl_object.h>
#include "cl_internal.h"
-static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
+static void __cl_page_delete(const struct lu_env *env, struct cl_page *pg);
static DEFINE_MUTEX(cl_page_kmem_mutex);
#ifdef LIBCFS_DEBUG
*/
static void cl_page_get_trust(struct cl_page *page)
{
- LASSERT(atomic_read(&page->cp_ref) > 0);
- atomic_inc(&page->cp_ref);
+ LASSERT(refcount_read(&page->cp_ref) > 0);
+ refcount_inc(&page->cp_ref);
}
static struct cl_page_slice *
slice = cl_page_slice_get(cl_page, i); i >= 0; \
slice = cl_page_slice_get(cl_page, --i))
-/**
- * Returns a slice within a cl_page, corresponding to the given layer in the
- * device stack.
- *
- * \see cl_lock_at()
- */
-static const struct cl_page_slice *
-cl_page_at_trusted(const struct cl_page *cl_page,
- const struct lu_device_type *dtype)
-{
- const struct cl_page_slice *slice;
- int i;
-
- ENTRY;
-
- cl_page_slice_for_each(cl_page, slice, i) {
- if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
- RETURN(slice);
- }
-
- RETURN(NULL);
-}
-
static void __cl_page_free(struct cl_page *cl_page, unsigned short bufsize)
{
int index = cl_page->cp_kmem_index;
}
}
-static void cl_page_free(const struct lu_env *env, struct cl_page *cl_page,
+static void cl_page_free(const struct lu_env *env, struct cl_page *cp,
struct pagevec *pvec)
{
- struct cl_object *obj = cl_page->cp_obj;
+ struct cl_object *obj = cp->cp_obj;
unsigned short bufsize = cl_object_header(obj)->coh_page_bufsize;
- struct cl_page_slice *slice;
- int i;
+ struct page *vmpage;
ENTRY;
- PASSERT(env, cl_page, list_empty(&cl_page->cp_batch));
- PASSERT(env, cl_page, cl_page->cp_owner == NULL);
- PASSERT(env, cl_page, cl_page->cp_state == CPS_FREEING);
-
- cl_page_slice_for_each(cl_page, slice, i) {
- if (unlikely(slice->cpl_ops->cpo_fini != NULL))
- slice->cpl_ops->cpo_fini(env, slice, pvec);
+ PASSERT(env, cp, list_empty(&cp->cp_batch));
+ PASSERT(env, cp, cp->cp_owner == NULL);
+ PASSERT(env, cp, cp->cp_state == CPS_FREEING);
+
+ if (cp->cp_type == CPT_CACHEABLE) {
+ /* vmpage->private was already cleared when page was
+ * moved into CPS_FREEING state. */
+ vmpage = cp->cp_vmpage;
+ LASSERT(vmpage != NULL);
+ LASSERT((struct cl_page *)vmpage->private != cp);
+
+ if (pvec != NULL) {
+ if (!pagevec_add(pvec, vmpage))
+ pagevec_release(pvec);
+ } else {
+ put_page(vmpage);
+ }
}
- cl_page->cp_layer_count = 0;
+
+ cp->cp_layer_count = 0;
cs_page_dec(obj, CS_total);
- cs_pagestate_dec(obj, cl_page->cp_state);
- lu_object_ref_del_at(&obj->co_lu, &cl_page->cp_obj_ref,
- "cl_page", cl_page);
- if (cl_page->cp_type != CPT_TRANSIENT)
+ cs_pagestate_dec(obj, cp->cp_state);
+ lu_object_ref_del_at(&obj->co_lu, &cp->cp_obj_ref, "cl_page", cp);
+ if (cp->cp_type != CPT_TRANSIENT)
cl_object_put(env, obj);
- lu_ref_fini(&cl_page->cp_reference);
- __cl_page_free(cl_page, bufsize);
+ lu_ref_fini(&cp->cp_reference);
+ __cl_page_free(cp, bufsize);
EXIT;
}
*/
BUILD_BUG_ON((1 << CP_STATE_BITS) < CPS_NR); /* cp_state */
BUILD_BUG_ON((1 << CP_TYPE_BITS) < CPT_NR); /* cp_type */
- atomic_set(&cl_page->cp_ref, 1);
+ refcount_set(&cl_page->cp_ref, 1);
cl_page->cp_obj = o;
if (type != CPT_TRANSIENT)
cl_object_get(o);
result = o->co_ops->coo_page_init(env, o,
cl_page, ind);
if (result != 0) {
- cl_page_delete0(env, cl_page);
+ __cl_page_delete(env, cl_page);
cl_page_free(env, cl_page, NULL);
cl_page = ERR_PTR(result);
break;
return cl_page_in_use_noref(pg);
}
-static void cl_page_state_set0(const struct lu_env *env,
- struct cl_page *cl_page,
- enum cl_page_state state)
+static void __cl_page_state_set(const struct lu_env *env,
+ struct cl_page *cl_page,
+ enum cl_page_state state)
{
enum cl_page_state old;
static void cl_page_state_set(const struct lu_env *env,
struct cl_page *page, enum cl_page_state state)
{
- cl_page_state_set0(env, page, state);
+ __cl_page_state_set(env, page, state);
}
/**
{
ENTRY;
CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
- atomic_read(&page->cp_ref));
+ refcount_read(&page->cp_ref));
- if (atomic_dec_and_test(&page->cp_ref)) {
+ if (refcount_dec_and_test(&page->cp_ref)) {
LASSERT(page->cp_state == CPS_FREEING);
- LASSERT(atomic_read(&page->cp_ref) == 0);
+ LASSERT(refcount_read(&page->cp_ref) == 0);
PASSERT(env, page, page->cp_owner == NULL);
PASSERT(env, page, list_empty(&page->cp_batch));
/*
}
EXPORT_SYMBOL(cl_vmpage_page);
-const struct cl_page_slice *cl_page_at(const struct cl_page *page,
- const struct lu_device_type *dtype)
-{
- return cl_page_at_trusted(page, dtype);
-}
-EXPORT_SYMBOL(cl_page_at);
-
static void cl_page_owner_clear(struct cl_page *page)
{
ENTRY;
EXIT;
}
-void cl_page_disown0(const struct lu_env *env,
- struct cl_io *io, struct cl_page *cl_page)
+void __cl_page_disown(const struct lu_env *env, struct cl_page *cp)
{
- const struct cl_page_slice *slice;
+ struct page *vmpage;
enum cl_page_state state;
- int i;
- ENTRY;
- state = cl_page->cp_state;
- PINVRNT(env, cl_page, state == CPS_OWNED ||
- state == CPS_FREEING);
- PINVRNT(env, cl_page, cl_page_invariant(cl_page) ||
- state == CPS_FREEING);
- cl_page_owner_clear(cl_page);
+ ENTRY;
+ state = cp->cp_state;
+ PINVRNT(env, cp, state == CPS_OWNED || state == CPS_FREEING);
+ PINVRNT(env, cp, cl_page_invariant(cp) || state == CPS_FREEING);
+ cl_page_owner_clear(cp);
if (state == CPS_OWNED)
- cl_page_state_set(env, cl_page, CPS_CACHED);
- /*
- * Completion call-backs are executed in the bottom-up order, so that
- * uppermost layer (llite), responsible for VFS/VM interaction runs
- * last and can release locks safely.
- */
- cl_page_slice_for_each_reverse(cl_page, slice, i) {
- if (slice->cpl_ops->cpo_disown != NULL)
- (*slice->cpl_ops->cpo_disown)(env, slice, io);
+ cl_page_state_set(env, cp, CPS_CACHED);
+
+ if (cp->cp_type == CPT_CACHEABLE) {
+ vmpage = cp->cp_vmpage;
+ LASSERT(vmpage != NULL);
+ LASSERT(PageLocked(vmpage));
+ unlock_page(vmpage);
}
EXIT;
* or, page was owned by another thread, or in IO.
*
* \see cl_page_disown()
- * \see cl_page_operations::cpo_own()
* \see cl_page_own_try()
* \see cl_page_own
*/
-static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
- struct cl_page *cl_page, int nonblock)
+static int __cl_page_own(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *cl_page, int nonblock)
{
- const struct cl_page_slice *slice;
- int result = 0;
- int i;
+ struct page *vmpage = cl_page->cp_vmpage;
+ int result;
ENTRY;
PINVRNT(env, cl_page, !cl_page_is_owned(cl_page, io));
- io = cl_io_top(io);
if (cl_page->cp_state == CPS_FREEING) {
result = -ENOENT;
goto out;
}
- cl_page_slice_for_each(cl_page, slice, i) {
- if (slice->cpl_ops->cpo_own)
- result = (*slice->cpl_ops->cpo_own)(env, slice,
- io, nonblock);
- if (result != 0)
- break;
- }
- if (result > 0)
- result = 0;
+ LASSERT(vmpage != NULL);
- if (result == 0) {
- PASSERT(env, cl_page, cl_page->cp_owner == NULL);
- cl_page->cp_owner = cl_io_top(io);
- cl_page_owner_set(cl_page);
- if (cl_page->cp_state != CPS_FREEING) {
- cl_page_state_set(env, cl_page, CPS_OWNED);
- } else {
- cl_page_disown0(env, io, cl_page);
- result = -ENOENT;
+ if (cl_page->cp_type == CPT_TRANSIENT) {
+ /* OK */
+ } else if (nonblock) {
+ if (!trylock_page(vmpage)) {
+ result = -EAGAIN;
+ goto out;
}
+
+ if (unlikely(PageWriteback(vmpage))) {
+ unlock_page(vmpage);
+ result = -EAGAIN;
+ goto out;
+ }
+ } else {
+ lock_page(vmpage);
+ wait_on_page_writeback(vmpage);
+ }
+
+ PASSERT(env, cl_page, cl_page->cp_owner == NULL);
+ cl_page->cp_owner = cl_io_top(io);
+ cl_page_owner_set(cl_page);
+
+ if (cl_page->cp_state == CPS_FREEING) {
+ __cl_page_disown(env, cl_page);
+ result = -ENOENT;
+ goto out;
}
+ cl_page_state_set(env, cl_page, CPS_OWNED);
+ result = 0;
out:
PINVRNT(env, cl_page, ergo(result == 0,
cl_page_invariant(cl_page)));
/**
* Own a page, might be blocked.
*
- * \see cl_page_own0()
+ * \see __cl_page_own()
*/
int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
{
- return cl_page_own0(env, io, pg, 0);
+ return __cl_page_own(env, io, pg, 0);
}
EXPORT_SYMBOL(cl_page_own);
/**
* Nonblock version of cl_page_own().
*
- * \see cl_page_own0()
+ * \see __cl_page_own()
*/
int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
struct cl_page *pg)
{
- return cl_page_own0(env, io, pg, 1);
+ return __cl_page_own(env, io, pg, 1);
}
EXPORT_SYMBOL(cl_page_own_try);
*
* Called when page is already locked by the hosting VM.
*
- * \pre !cl_page_is_owned(cl_page, io)
- * \post cl_page_is_owned(cl_page, io)
- *
- * \see cl_page_operations::cpo_assume()
+ * \pre !cl_page_is_owned(cp, io)
+ * \post cl_page_is_owned(cp, io)
*/
void cl_page_assume(const struct lu_env *env,
- struct cl_io *io, struct cl_page *cl_page)
+ struct cl_io *io, struct cl_page *cp)
{
- const struct cl_page_slice *slice;
- int i;
+ struct page *vmpage;
ENTRY;
+ PINVRNT(env, cp, cl_object_same(cp->cp_obj, io->ci_obj));
- PINVRNT(env, cl_page,
- cl_object_same(cl_page->cp_obj, io->ci_obj));
- io = cl_io_top(io);
-
- cl_page_slice_for_each(cl_page, slice, i) {
- if (slice->cpl_ops->cpo_assume != NULL)
- (*slice->cpl_ops->cpo_assume)(env, slice, io);
+ if (cp->cp_type == CPT_CACHEABLE) {
+ vmpage = cp->cp_vmpage;
+ LASSERT(vmpage != NULL);
+ LASSERT(PageLocked(vmpage));
+ wait_on_page_writeback(vmpage);
}
- PASSERT(env, cl_page, cl_page->cp_owner == NULL);
- cl_page->cp_owner = cl_io_top(io);
- cl_page_owner_set(cl_page);
- cl_page_state_set(env, cl_page, CPS_OWNED);
+ PASSERT(env, cp, cp->cp_owner == NULL);
+ cp->cp_owner = cl_io_top(io);
+ cl_page_owner_set(cp);
+ cl_page_state_set(env, cp, CPS_OWNED);
EXIT;
}
EXPORT_SYMBOL(cl_page_assume);
* Moves cl_page into cl_page_state::CPS_CACHED without releasing a lock
* on the underlying VM page (as VM is supposed to do this itself).
*
- * \pre cl_page_is_owned(cl_page, io)
- * \post !cl_page_is_owned(cl_page, io)
- *
- * \see cl_page_assume()
+ * \pre cl_page_is_owned(cp, io)
+ * \post !cl_page_is_owned(cp, io)
*/
void cl_page_unassume(const struct lu_env *env,
- struct cl_io *io, struct cl_page *cl_page)
+ struct cl_io *io, struct cl_page *cp)
{
- const struct cl_page_slice *slice;
- int i;
+ struct page *vmpage;
- ENTRY;
- PINVRNT(env, cl_page, cl_page_is_owned(cl_page, io));
- PINVRNT(env, cl_page, cl_page_invariant(cl_page));
+ ENTRY;
+ PINVRNT(env, cp, cl_page_is_owned(cp, io));
+ PINVRNT(env, cp, cl_page_invariant(cp));
- io = cl_io_top(io);
- cl_page_owner_clear(cl_page);
- cl_page_state_set(env, cl_page, CPS_CACHED);
+ cl_page_owner_clear(cp);
+ cl_page_state_set(env, cp, CPS_CACHED);
- cl_page_slice_for_each_reverse(cl_page, slice, i) {
- if (slice->cpl_ops->cpo_unassume != NULL)
- (*slice->cpl_ops->cpo_unassume)(env, slice, io);
+ if (cp->cp_type == CPT_CACHEABLE) {
+ vmpage = cp->cp_vmpage;
+ LASSERT(vmpage != NULL);
+ LASSERT(PageLocked(vmpage));
}
EXIT;
* \post !cl_page_is_owned(pg, io)
*
* \see cl_page_own()
- * \see cl_page_operations::cpo_disown()
*/
void cl_page_disown(const struct lu_env *env,
struct cl_io *io, struct cl_page *pg)
PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
pg->cp_state == CPS_FREEING);
- ENTRY;
- io = cl_io_top(io);
- cl_page_disown0(env, io, pg);
- EXIT;
+ __cl_page_disown(env, pg);
}
EXPORT_SYMBOL(cl_page_disown);
* \see cl_page_operations::cpo_discard()
*/
void cl_page_discard(const struct lu_env *env,
- struct cl_io *io, struct cl_page *cl_page)
+ struct cl_io *io, struct cl_page *cp)
{
+ struct page *vmpage;
const struct cl_page_slice *slice;
int i;
- PINVRNT(env, cl_page, cl_page_is_owned(cl_page, io));
- PINVRNT(env, cl_page, cl_page_invariant(cl_page));
+ PINVRNT(env, cp, cl_page_is_owned(cp, io));
+ PINVRNT(env, cp, cl_page_invariant(cp));
- cl_page_slice_for_each(cl_page, slice, i) {
+ cl_page_slice_for_each(cp, slice, i) {
if (slice->cpl_ops->cpo_discard != NULL)
(*slice->cpl_ops->cpo_discard)(env, slice, io);
}
+
+ if (cp->cp_type == CPT_CACHEABLE) {
+ vmpage = cp->cp_vmpage;
+ LASSERT(vmpage != NULL);
+ LASSERT(PageLocked(vmpage));
+ generic_error_remove_page(vmpage->mapping, vmpage);
+ } else {
+ cl_page_delete(env, cp);
+ }
}
EXPORT_SYMBOL(cl_page_discard);
/**
* Version of cl_page_delete() that can be called for not fully constructed
- * cl_pages, e.g. in an error handling cl_page_find()->cl_page_delete0()
+ * cl_pages, e.g. in an error handling cl_page_find()->__cl_page_delete()
* path. Doesn't check cl_page invariant.
*/
-static void cl_page_delete0(const struct lu_env *env,
- struct cl_page *cl_page)
+static void __cl_page_delete(const struct lu_env *env, struct cl_page *cp)
{
const struct cl_page_slice *slice;
int i;
- ENTRY;
-
- PASSERT(env, cl_page, cl_page->cp_state != CPS_FREEING);
+ ENTRY;
+ PASSERT(env, cp, cp->cp_state != CPS_FREEING);
/*
* Severe all ways to obtain new pointers to @pg.
*/
- cl_page_owner_clear(cl_page);
- cl_page_state_set0(env, cl_page, CPS_FREEING);
+ cl_page_owner_clear(cp);
+ __cl_page_state_set(env, cp, CPS_FREEING);
- cl_page_slice_for_each_reverse(cl_page, slice, i) {
+ cl_page_slice_for_each_reverse(cp, slice, i) {
if (slice->cpl_ops->cpo_delete != NULL)
(*slice->cpl_ops->cpo_delete)(env, slice);
}
{
PINVRNT(env, pg, cl_page_invariant(pg));
ENTRY;
- cl_page_delete0(env, pg);
+ __cl_page_delete(env, pg);
EXIT;
}
EXPORT_SYMBOL(cl_page_delete);
-/**
- * Marks page up-to-date.
- *
- * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
- * layer responsible for VM interaction has to mark/clear page as up-to-date
- * by the \a uptodate argument.
- *
- * \see cl_page_operations::cpo_export()
- */
-void cl_page_export(const struct lu_env *env, struct cl_page *cl_page,
- int uptodate)
-{
- const struct cl_page_slice *slice;
- int i;
-
- PINVRNT(env, cl_page, cl_page_invariant(cl_page));
-
- cl_page_slice_for_each(cl_page, slice, i) {
- if (slice->cpl_ops->cpo_export != NULL)
- (*slice->cpl_ops->cpo_export)(env, slice, uptodate);
- }
-}
-EXPORT_SYMBOL(cl_page_export);
-
-/**
- * Returns true, if \a page is VM locked in a suitable sense by the calling
- * thread.
- */
-int cl_page_is_vmlocked(const struct lu_env *env,
- const struct cl_page *cl_page)
-{
- const struct cl_page_slice *slice;
- int result;
-
- ENTRY;
- slice = cl_page_slice_get(cl_page, 0);
- PASSERT(env, cl_page, slice->cpl_ops->cpo_is_vmlocked != NULL);
- /*
- * Call ->cpo_is_vmlocked() directly instead of going through
- * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
- * cl_page_invariant().
- */
- result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
- PASSERT(env, cl_page, result == -EBUSY || result == -ENODATA);
-
- RETURN(result == -EBUSY);
-}
-EXPORT_SYMBOL(cl_page_is_vmlocked);
-
void cl_page_touch(const struct lu_env *env,
const struct cl_page *cl_page, size_t to)
{
}
/**
- * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
- * called top-to-bottom. Every layer either agrees to submit this page (by
- * returning 0), or requests to omit this page (by returning -EALREADY). Layer
- * handling interactions with the VM also has to inform VM that page is under
- * transfer now.
+ * Prepares page for immediate transfer. Return -EALREADY if this page
+ * should be omitted from transfer.
*/
int cl_page_prep(const struct lu_env *env, struct cl_io *io,
- struct cl_page *cl_page, enum cl_req_type crt)
+ struct cl_page *cp, enum cl_req_type crt)
{
- const struct cl_page_slice *slice;
- int result = 0;
- int i;
-
- PINVRNT(env, cl_page, cl_page_is_owned(cl_page, io));
- PINVRNT(env, cl_page, cl_page_invariant(cl_page));
- PINVRNT(env, cl_page, crt < CRT_NR);
+ struct page *vmpage = cp->cp_vmpage;
+ int rc;
+
+ PASSERT(env, cp, crt < CRT_NR);
+ PINVRNT(env, cp, cl_page_is_owned(cp, io));
+ PINVRNT(env, cp, cl_page_invariant(cp));
+
+ if (cp->cp_type == CPT_TRANSIENT) {
+ /* Nothing to do. */
+ } else if (crt == CRT_READ) {
+ if (PageUptodate(vmpage))
+ GOTO(out, rc = -EALREADY);
+ } else {
+ LASSERT(PageLocked(vmpage));
+ LASSERT(!PageDirty(vmpage));
- /*
- * this has to be called bottom-to-top, so that llite can set up
- * PG_writeback without risking other layers deciding to skip this
- * page.
- */
- if (crt >= CRT_NR)
- return -EINVAL;
-
- if (cl_page->cp_type != CPT_TRANSIENT) {
- cl_page_slice_for_each(cl_page, slice, i) {
- if (slice->cpl_ops->cpo_own)
- result =
- (*slice->cpl_ops->io[crt].cpo_prep)(env,
- slice,
- io);
- if (result != 0)
- break;
- }
+ /* ll_writepage path is not a sync write, so need to
+ * set page writeback flag
+ */
+ if (cp->cp_sync_io == NULL)
+ set_page_writeback(vmpage);
}
- if (result >= 0) {
- result = 0;
- cl_page_io_start(env, cl_page, crt);
- }
+ cl_page_io_start(env, cp, crt);
+ rc = 0;
+out:
+ CL_PAGE_HEADER(D_TRACE, env, cp, "%d %d\n", crt, rc);
- CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, result);
- return result;
+ return rc;
}
EXPORT_SYMBOL(cl_page_prep);
*
* \pre cl_page->cp_state == CPS_CACHED
* \post cl_page->cp_state == CPS_PAGEIN || cl_page->cp_state == CPS_PAGEOUT
- *
- * \see cl_page_operations::cpo_make_ready()
*/
-int cl_page_make_ready(const struct lu_env *env, struct cl_page *cl_page,
- enum cl_req_type crt)
+int cl_page_make_ready(const struct lu_env *env, struct cl_page *cp,
+ enum cl_req_type crt)
{
- const struct cl_page_slice *slice;
- int result = 0;
- int i;
+ struct page *vmpage = cp->cp_vmpage;
+ int rc;
- ENTRY;
- PINVRNT(env, cl_page, crt < CRT_NR);
- if (crt >= CRT_NR)
- RETURN(-EINVAL);
+ ENTRY;
+ PASSERT(env, cp, crt == CRT_WRITE);
- cl_page_slice_for_each(cl_page, slice, i) {
- if (slice->cpl_ops->io[crt].cpo_make_ready != NULL)
- result = (*slice->cpl_ops->io[crt].cpo_make_ready)(env, slice);
- if (result != 0)
- break;
+ if (cp->cp_type == CPT_TRANSIENT)
+ GOTO(out, rc = 0);
+
+ lock_page(vmpage);
+
+ if (clear_page_dirty_for_io(vmpage)) {
+ LASSERT(cp->cp_state == CPS_CACHED);
+ /* This actually clears the dirty bit in the
+ * radix tree.
+ */
+ set_page_writeback(vmpage);
+ CL_PAGE_HEADER(D_PAGE, env, cp, "readied\n");
+ rc = 0;
+ } else if (cp->cp_state == CPS_PAGEOUT) {
+ /* is it possible for osc_flush_async_page()
+ * to already make it ready?
+ */
+ rc = -EALREADY;
+ } else {
+ CL_PAGE_DEBUG(D_ERROR, env, cp,
+ "unexpecting page state %d\n",
+ cp->cp_state);
+ LBUG();
}
- if (result >= 0) {
- result = 0;
- PASSERT(env, cl_page, cl_page->cp_state == CPS_CACHED);
- cl_page_io_start(env, cl_page, crt);
- }
- CL_PAGE_HEADER(D_TRACE, env, cl_page, "%d %d\n", crt, result);
+ unlock_page(vmpage);
+out:
+ if (rc == 0) {
+ PASSERT(env, cp, cp->cp_state == CPS_CACHED);
+ cl_page_io_start(env, cp, crt);
+ }
- RETURN(result);
+ CL_PAGE_HEADER(D_TRACE, env, cp, "%d %d\n", crt, rc);
+
+ return rc;
}
EXPORT_SYMBOL(cl_page_make_ready);
{
(*printer)(env, cookie,
"page@%p[%d %p %d %d %p]\n",
- pg, atomic_read(&pg->cp_ref), pg->cp_obj,
+ pg, refcount_read(&pg->cp_ref), pg->cp_obj,
pg->cp_state, pg->cp_type,
pg->cp_owner);
}
* Prints human readable representation of \a cl_page to the \a f.
*/
void cl_page_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer, const struct cl_page *cl_page)
+ lu_printer_t printer, const struct cl_page *cp)
{
+ struct page *vmpage = cp->cp_vmpage;
const struct cl_page_slice *slice;
int result = 0;
int i;
- cl_page_header_print(env, cookie, printer, cl_page);
- cl_page_slice_for_each(cl_page, slice, i) {
+ cl_page_header_print(env, cookie, printer, cp);
+
+ (*printer)(env, cookie, "vmpage @%p", vmpage);
+
+ if (vmpage != NULL) {
+ (*printer)(env, cookie, " %lx %d:%d %lx %lu %slru",
+ (long)vmpage->flags, page_count(vmpage),
+ page_mapcount(vmpage), vmpage->private,
+ page_index(vmpage),
+ list_empty(&vmpage->lru) ? "not-" : "");
+ }
+
+ (*printer)(env, cookie, "\n");
+
+ cl_page_slice_for_each(cp, slice, i) {
if (slice->cpl_ops->cpo_print != NULL)
result = (*slice->cpl_ops->cpo_print)(env, slice,
- cookie, printer);
+ cookie, printer);
if (result != 0)
break;
}
- (*printer)(env, cookie, "end page@%p\n", cl_page);
+
+ (*printer)(env, cookie, "end page@%p\n", cp);
}
EXPORT_SYMBOL(cl_page_print);
LASSERT(cl_page->cp_layer_count < CP_MAX_LAYER);
LASSERT(offset < (1 << sizeof(cl_page->cp_layer_offset[0]) * 8));
cl_page->cp_layer_offset[cl_page->cp_layer_count++] = offset;
- slice->cpl_obj = obj;
slice->cpl_ops = ops;
slice->cpl_page = cl_page;
RETURN(NULL);
/* Initialize cache data */
- atomic_set(&cache->ccc_users, 1);
+ refcount_set(&cache->ccc_users, 1);
cache->ccc_lru_max = lru_page_max;
atomic_long_set(&cache->ccc_lru_left, lru_page_max);
spin_lock_init(&cache->ccc_lru_lock);
*/
void cl_cache_incref(struct cl_client_cache *cache)
{
- atomic_inc(&cache->ccc_users);
+ refcount_inc(&cache->ccc_users);
}
EXPORT_SYMBOL(cl_cache_incref);
*/
void cl_cache_decref(struct cl_client_cache *cache)
{
- if (atomic_dec_and_test(&cache->ccc_users))
+ if (refcount_dec_and_test(&cache->ccc_users))
OBD_FREE(cache, sizeof(*cache));
}
EXPORT_SYMBOL(cl_cache_decref);
struct echo_device *eo_dev;
struct list_head eo_obj_chain;
struct lov_oinfo *eo_oinfo;
- atomic_t eo_npages;
int eo_deleted;
};
struct lov_oinfo **eoc_oinfo;
};
-struct echo_page {
- struct cl_page_slice ep_cl;
- unsigned long ep_lock;
-};
-
-struct echo_lock {
- struct cl_lock_slice el_cl;
- struct list_head el_chain;
- struct echo_object *el_object;
- __u64 el_cookie;
- atomic_t el_refcount;
-};
-
#ifdef HAVE_SERVER_SUPPORT
static const char echo_md_root_dir_name[] = "ROOT_ECHO";
/** \defgroup echo_helpers Helper functions
* @{
*/
-static inline struct echo_device *cl2echo_dev(const struct cl_device *dev)
+static struct echo_device *cl2echo_dev(const struct cl_device *dev)
{
return container_of_safe(dev, struct echo_device, ed_cl);
}
-static inline struct cl_device *echo_dev2cl(struct echo_device *d)
+static struct cl_device *echo_dev2cl(struct echo_device *d)
{
return &d->ed_cl;
}
-static inline struct echo_device *obd2echo_dev(const struct obd_device *obd)
+static struct echo_device *obd2echo_dev(const struct obd_device *obd)
{
return cl2echo_dev(lu2cl_dev(obd->obd_lu_dev));
}
-static inline struct cl_object *echo_obj2cl(struct echo_object *eco)
+static struct cl_object *echo_obj2cl(struct echo_object *eco)
{
return &eco->eo_cl;
}
-static inline struct echo_object *cl2echo_obj(const struct cl_object *o)
+static struct echo_object *cl2echo_obj(const struct cl_object *o)
{
return container_of(o, struct echo_object, eo_cl);
}
-static inline struct echo_page *cl2echo_page(const struct cl_page_slice *s)
-{
- return container_of(s, struct echo_page, ep_cl);
-}
-
-static inline struct echo_lock *cl2echo_lock(const struct cl_lock_slice *s)
-{
- return container_of(s, struct echo_lock, el_cl);
-}
-
-static inline struct cl_lock *echo_lock2cl(const struct echo_lock *ecl)
-{
- return ecl->el_cl.cls_lock;
-}
-
static struct lu_context_key echo_thread_key;
-static inline struct echo_thread_info *echo_env_info(const struct lu_env *env)
+static struct echo_thread_info *echo_env_info(const struct lu_env *env)
{
struct echo_thread_info *info;
return info;
}
-static inline
-struct echo_object_conf *cl2echo_conf(const struct cl_object_conf *c)
+static struct echo_object_conf *cl2echo_conf(const struct cl_object_conf *c)
{
return container_of(c, struct echo_object_conf, eoc_cl);
}
#ifdef HAVE_SERVER_SUPPORT
-static inline struct echo_md_device *lu2emd_dev(struct lu_device *d)
+static struct echo_md_device *lu2emd_dev(struct lu_device *d)
{
return container_of_safe(d, struct echo_md_device,
emd_md_dev.md_lu_dev);
}
-static inline struct lu_device *emd2lu_dev(struct echo_md_device *d)
+static struct lu_device *emd2lu_dev(struct echo_md_device *d)
{
return &d->emd_md_dev.md_lu_dev;
}
-static inline struct seq_server_site *echo_md_seq_site(struct echo_md_device *d)
+static struct seq_server_site *echo_md_seq_site(struct echo_md_device *d)
{
return emd2lu_dev(d)->ld_site->ld_seq_site;
}
-static inline struct obd_device *emd2obd_dev(struct echo_md_device *d)
+static struct obd_device *emd2obd_dev(struct echo_md_device *d)
{
return d->emd_md_dev.md_lu_dev.ld_obd;
}
/** @} echo_helpers */
static int cl_echo_object_put(struct echo_object *eco);
-static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
- struct page **pages, int npages, int async);
struct echo_thread_info {
struct echo_object_conf eti_conf;
struct lustre_md eti_md;
- struct cl_2queue eti_queue;
- struct cl_io eti_io;
- struct cl_lock eti_lock;
struct lu_fid eti_fid;
struct lu_fid eti_fid2;
#ifdef HAVE_SERVER_SUPPORT
unsigned long dummy;
};
-static struct kmem_cache *echo_lock_kmem;
static struct kmem_cache *echo_object_kmem;
static struct kmem_cache *echo_thread_kmem;
static struct kmem_cache *echo_session_kmem;
-/* static struct kmem_cache *echo_req_kmem; */
static struct lu_kmem_descr echo_caches[] = {
{
- .ckd_cache = &echo_lock_kmem,
- .ckd_name = "echo_lock_kmem",
- .ckd_size = sizeof(struct echo_lock)
- },
- {
.ckd_cache = &echo_object_kmem,
.ckd_name = "echo_object_kmem",
.ckd_size = sizeof(struct echo_object)
}
};
-/** \defgroup echo_page Page operations
- *
- * Echo page operations.
- *
- * @{
- */
-static int echo_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io, int nonblock)
-{
- struct echo_page *ep = cl2echo_page(slice);
-
- if (!nonblock) {
- if (test_and_set_bit(0, &ep->ep_lock))
- return -EAGAIN;
- } else {
- while (test_and_set_bit(0, &ep->ep_lock))
- wait_on_bit(&ep->ep_lock, 0, TASK_UNINTERRUPTIBLE);
- }
- return 0;
-}
-
-static void echo_page_disown(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
-{
- struct echo_page *ep = cl2echo_page(slice);
-
- LASSERT(test_bit(0, &ep->ep_lock));
- clear_and_wake_up_bit(0, &ep->ep_lock);
-}
-
-static void echo_page_discard(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- cl_page_delete(env, slice->cpl_page);
-}
-
-static int echo_page_is_vmlocked(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- if (test_bit(0, &cl2echo_page(slice)->ep_lock))
- return -EBUSY;
- return -ENODATA;
-}
-
-static void echo_page_completion(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
-{
- LASSERT(slice->cpl_page->cp_sync_io != NULL);
-}
-
-static void echo_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice,
- struct pagevec *pvec)
-{
- struct echo_object *eco = cl2echo_obj(slice->cpl_obj);
-
- ENTRY;
- atomic_dec(&eco->eo_npages);
- put_page(slice->cpl_page->cp_vmpage);
- EXIT;
-}
-
-static int echo_page_prep(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- return 0;
-}
-
-static int echo_page_print(const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t printer)
-{
- struct echo_page *ep = cl2echo_page(slice);
-
- (*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n",
- ep, test_bit(0, &ep->ep_lock),
- slice->cpl_page->cp_vmpage);
- return 0;
-}
-
-static const struct cl_page_operations echo_page_ops = {
- .cpo_own = echo_page_own,
- .cpo_disown = echo_page_disown,
- .cpo_discard = echo_page_discard,
- .cpo_fini = echo_page_fini,
- .cpo_print = echo_page_print,
- .cpo_is_vmlocked = echo_page_is_vmlocked,
- .io = {
- [CRT_READ] = {
- .cpo_prep = echo_page_prep,
- .cpo_completion = echo_page_completion,
- },
- [CRT_WRITE] = {
- .cpo_prep = echo_page_prep,
- .cpo_completion = echo_page_completion,
- }
- }
-};
-
-/** @} echo_page */
-
-/** \defgroup echo_lock Locking
- *
- * echo lock operations
- *
- * @{
- */
-static void echo_lock_fini(const struct lu_env *env,
- struct cl_lock_slice *slice)
-{
- struct echo_lock *ecl = cl2echo_lock(slice);
-
- LASSERT(list_empty(&ecl->el_chain));
- OBD_SLAB_FREE_PTR(ecl, echo_lock_kmem);
-}
-
-static const struct cl_lock_operations echo_lock_ops = {
- .clo_fini = echo_lock_fini,
-};
-
-/** @} echo_lock */
-
-/** \defgroup echo_cl_ops cl_object operations
- *
- * operations for cl_object
- *
- * @{
- */
-static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, pgoff_t index)
-{
- struct echo_page *ep = cl_object_page_slice(obj, page);
- struct echo_object *eco = cl2echo_obj(obj);
-
- ENTRY;
- get_page(page->cp_vmpage);
- /*
- * ep_lock is similar to the lock_page() lock, and
- * cannot usefully be monitored by lockdep.
- * So just use a bit in an "unsigned long" and use the
- * wait_on_bit() interface to wait for the bit to be clear.
- */
- ep->ep_lock = 0;
- cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
- atomic_inc(&eco->eo_npages);
- RETURN(0);
-}
-
-static int echo_io_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io)
-{
- return 0;
-}
-
-static int echo_lock_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *unused)
-{
- struct echo_lock *el;
-
- ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(el, echo_lock_kmem, GFP_NOFS);
- if (el) {
- cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops);
- el->el_object = cl2echo_obj(obj);
- INIT_LIST_HEAD(&el->el_chain);
- atomic_set(&el->el_refcount, 0);
- }
- RETURN(el ? 0 : -ENOMEM);
-}
-
-static int echo_conf_set(const struct lu_env *env, struct cl_object *obj,
- const struct cl_object_conf *conf)
-{
- return 0;
-}
-
-static const struct cl_object_operations echo_cl_obj_ops = {
- .coo_page_init = echo_page_init,
- .coo_lock_init = echo_lock_init,
- .coo_io_init = echo_io_init,
- .coo_conf_set = echo_conf_set
-};
-/** @} echo_cl_ops */
-
/** \defgroup echo_lu_ops lu_object operations
*
* operations for echo lu object.
}
eco->eo_dev = ed;
- atomic_set(&eco->eo_npages, 0);
- cl_object_page_init(lu2cl(obj), sizeof(struct echo_page));
+ cl_object_page_init(lu2cl(obj), 0);
spin_lock(&ec->ec_lock);
list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
ec = eco->eo_dev->ed_ec;
- LASSERT(atomic_read(&eco->eo_npages) == 0);
-
spin_lock(&ec->ec_lock);
list_del_init(&eco->eo_obj_chain);
spin_unlock(&ec->ec_lock);
lu_object_init(obj, &hdr->coh_lu, dev);
lu_object_add_top(&hdr->coh_lu, obj);
-
- eco->eo_cl.co_ops = &echo_cl_obj_ops;
obj->lo_ops = &echo_lu_obj_ops;
}
RETURN(obj);
return NULL;
}
-static void echo_lock_release(const struct lu_env *env,
- struct echo_lock *ecl,
- int still_used)
-{
- struct cl_lock *clk = echo_lock2cl(ecl);
-
- cl_lock_release(env, clk);
-}
-
static struct lu_device *echo_device_free(const struct lu_env *env,
struct lu_device *d)
{
RETURN(0);
}
-static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
- u64 start, u64 end, int mode,
- __u64 *cookie, __u32 enqflags)
-{
- struct cl_io *io;
- struct cl_lock *lck;
- struct cl_object *obj;
- struct cl_lock_descr *descr;
- struct echo_thread_info *info;
- int rc = -ENOMEM;
-
- ENTRY;
- info = echo_env_info(env);
- io = &info->eti_io;
- lck = &info->eti_lock;
- obj = echo_obj2cl(eco);
-
- memset(lck, 0, sizeof(*lck));
- descr = &lck->cll_descr;
- descr->cld_obj = obj;
- descr->cld_start = cl_index(obj, start);
- descr->cld_end = cl_index(obj, end);
- descr->cld_mode = mode == LCK_PW ? CLM_WRITE : CLM_READ;
- descr->cld_enq_flags = enqflags;
- io->ci_obj = obj;
-
- rc = cl_lock_request(env, io, lck);
- if (rc == 0) {
- struct echo_client_obd *ec = eco->eo_dev->ed_ec;
- struct echo_lock *el;
-
- el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
- spin_lock(&ec->ec_lock);
- if (list_empty(&el->el_chain)) {
- list_add(&el->el_chain, &ec->ec_locks);
- el->el_cookie = ++ec->ec_unique;
- }
- atomic_inc(&el->el_refcount);
- *cookie = el->el_cookie;
- spin_unlock(&ec->ec_lock);
- }
- RETURN(rc);
-}
-
-static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
- __u64 cookie)
-{
- struct echo_client_obd *ec = ed->ed_ec;
- struct echo_lock *ecl = NULL;
- struct list_head *el;
- int found = 0, still_used = 0;
-
- ENTRY;
- LASSERT(ec != NULL);
- spin_lock(&ec->ec_lock);
- list_for_each(el, &ec->ec_locks) {
- ecl = list_entry(el, struct echo_lock, el_chain);
- CDEBUG(D_INFO, "ecl: %p, cookie: %#llx\n", ecl, ecl->el_cookie);
- found = (ecl->el_cookie == cookie);
- if (found) {
- if (atomic_dec_and_test(&ecl->el_refcount))
- list_del_init(&ecl->el_chain);
- else
- still_used = 1;
- break;
- }
- }
- spin_unlock(&ec->ec_lock);
-
- if (!found)
- RETURN(-ENOENT);
-
- echo_lock_release(env, ecl, still_used);
- RETURN(0);
-}
-
-static void echo_commit_callback(const struct lu_env *env, struct cl_io *io,
- struct pagevec *pvec)
-{
- struct echo_thread_info *info;
- struct cl_2queue *queue;
- int i = 0;
-
- info = echo_env_info(env);
- LASSERT(io == &info->eti_io);
-
- queue = &info->eti_queue;
-
- for (i = 0; i < pagevec_count(pvec); i++) {
- struct page *vmpage = pvec->pages[i];
- struct cl_page *page = (struct cl_page *)vmpage->private;
-
- cl_page_list_add(&queue->c2_qout, page, true);
- }
-}
-
-static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
- struct page **pages, int npages, int async)
-{
- struct lu_env *env;
- struct echo_thread_info *info;
- struct cl_object *obj = echo_obj2cl(eco);
- struct echo_device *ed = eco->eo_dev;
- struct cl_2queue *queue;
- struct cl_io *io;
- struct cl_page *clp;
- struct lustre_handle lh = { 0 };
- int page_size = cl_page_size(obj);
- int rc;
- int i;
- __u16 refcheck;
-
- ENTRY;
- LASSERT((offset & ~PAGE_MASK) == 0);
- LASSERT(ed->ed_next != NULL);
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- RETURN(PTR_ERR(env));
-
- info = echo_env_info(env);
- io = &info->eti_io;
- queue = &info->eti_queue;
-
- cl_2queue_init(queue);
-
- io->ci_ignore_layout = 1;
- rc = cl_io_init(env, io, CIT_MISC, obj);
- if (rc < 0)
- GOTO(out, rc);
- LASSERT(rc == 0);
-
- rc = cl_echo_enqueue0(env, eco, offset,
- offset + npages * PAGE_SIZE - 1,
- rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
- CEF_NEVER);
- if (rc < 0)
- GOTO(error_lock, rc);
-
- for (i = 0; i < npages; i++) {
- LASSERT(pages[i]);
- clp = cl_page_find(env, obj, cl_index(obj, offset),
- pages[i], CPT_TRANSIENT);
- if (IS_ERR(clp)) {
- rc = PTR_ERR(clp);
- break;
- }
- LASSERT(clp->cp_type == CPT_TRANSIENT);
-
- rc = cl_page_own(env, io, clp);
- if (rc) {
- LASSERT(clp->cp_state == CPS_FREEING);
- cl_page_put(env, clp);
- break;
- }
-
- cl_2queue_add(queue, clp, true);
-
- /*
- * drop the reference count for cl_page_find, so that the page
- * will be freed in cl_2queue_fini.
- */
- cl_page_put(env, clp);
- cl_page_clip(env, clp, 0, page_size);
-
- offset += page_size;
- }
-
- if (rc == 0) {
- enum cl_req_type typ = rw == READ ? CRT_READ : CRT_WRITE;
-
- async = async && (typ == CRT_WRITE);
- if (async)
- rc = cl_io_commit_async(env, io, &queue->c2_qin,
- 0, PAGE_SIZE,
- echo_commit_callback);
- else
- rc = cl_io_submit_sync(env, io, typ, queue, 0);
- CDEBUG(D_INFO, "echo_client %s write returns %d\n",
- async ? "async" : "sync", rc);
- }
-
- cl_echo_cancel0(env, ed, lh.cookie);
- EXIT;
-error_lock:
- cl_2queue_discard(env, io, queue);
- cl_2queue_disown(env, io, queue);
- cl_2queue_fini(env, queue);
- cl_io_fini(env, io);
-out:
- cl_env_put(env, &refcheck);
- return rc;
-}
/** @} echo_exports */
static u64 last_object_id;
#ifdef HAVE_SERVER_SUPPORT
-static inline void echo_md_build_name(struct lu_name *lname, char *name,
+static void echo_md_build_name(struct lu_name *lname, char *name,
__u64 id)
{
snprintf(name, ETI_NAME_LEN, "%llu", id);
return rc;
}
-static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
- struct echo_object *eco, u64 offset,
- u64 count, int async)
-{
- size_t npages;
- struct brw_page *pga;
- struct brw_page *pgp;
- struct page **pages;
- u64 off;
- size_t i;
- int rc;
- int verify;
- gfp_t gfp_mask;
- u32 brw_flags = 0;
-
- ENTRY;
- verify = (ostid_id(&oa->o_oi) != ECHO_PERSISTENT_OBJID &&
- (oa->o_valid & OBD_MD_FLFLAGS) != 0 &&
- (oa->o_flags & OBD_FL_DEBUG_CHECK) != 0);
-
- gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_KERNEL : GFP_HIGHUSER;
-
- LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
-
- if ((count & (~PAGE_MASK)) != 0)
- RETURN(-EINVAL);
-
- /* XXX think again with misaligned I/O */
- npages = count >> PAGE_SHIFT;
-
- if (rw == OBD_BRW_WRITE)
- brw_flags = OBD_BRW_ASYNC;
-
- OBD_ALLOC_PTR_ARRAY_LARGE(pga, npages);
- if (!pga)
- RETURN(-ENOMEM);
-
- OBD_ALLOC_PTR_ARRAY_LARGE(pages, npages);
- if (!pages) {
- OBD_FREE_PTR_ARRAY_LARGE(pga, npages);
- RETURN(-ENOMEM);
- }
-
- for (i = 0, pgp = pga, off = offset;
- i < npages;
- i++, pgp++, off += PAGE_SIZE) {
-
- LASSERT(pgp->pg == NULL); /* for cleanup */
-
- rc = -ENOMEM;
- pgp->pg = alloc_page(gfp_mask);
- if (!pgp->pg)
- goto out;
-
- /* set mapping so page is not considered encrypted */
- pgp->pg->mapping = ECHO_MAPPING_UNENCRYPTED;
- pages[i] = pgp->pg;
- pgp->count = PAGE_SIZE;
- pgp->off = off;
- pgp->flag = brw_flags;
-
- if (verify)
- echo_client_page_debug_setup(pgp->pg, rw,
- ostid_id(&oa->o_oi), off,
- pgp->count);
- }
-
- /* brw mode can only be used at client */
- LASSERT(ed->ed_next != NULL);
- rc = cl_echo_object_brw(eco, rw, offset, pages, npages, async);
-
- out:
- if (rc != 0 || rw != OBD_BRW_READ)
- verify = 0;
-
- for (i = 0, pgp = pga; i < npages; i++, pgp++) {
- if (!pgp->pg)
- continue;
-
- if (verify) {
- int vrc;
-
- vrc = echo_client_page_debug_check(pgp->pg,
- ostid_id(&oa->o_oi),
- pgp->off,
- pgp->count);
- if (vrc != 0 && rc == 0)
- rc = vrc;
- }
- __free_page(pgp->pg);
- }
- OBD_FREE_PTR_ARRAY_LARGE(pga, npages);
- OBD_FREE_PTR_ARRAY_LARGE(pages, npages);
- RETURN(rc);
-}
-
static int echo_client_prep_commit(const struct lu_env *env,
struct obd_export *exp, int rw,
struct obdo *oa, struct echo_object *eco,
data->ioc_plen1 = PTLRPC_MAX_BRW_SIZE;
switch (test_mode) {
- case 1:
- fallthrough;
- case 2:
- rc = echo_client_kbrw(ed, rw, oa, eco, data->ioc_offset,
- data->ioc_count, async);
- break;
case 3:
rc = echo_client_prep_commit(env, ec->ec_exp, rw, oa, eco,
data->ioc_offset, data->ioc_count,
{
struct osc_page *opg = oap2osc_page(oap);
pgoff_t index = osc_index(oap2osc(oap));
- struct cl_object *obj;
+ struct cl_object *obj = osc2cl(osc_page_object(opg));
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
int result;
loff_t kms;
/* readpage queues with _COUNT_STABLE, shouldn't get here. */
LASSERT(!(cmd & OBD_BRW_READ));
LASSERT(opg != NULL);
- obj = opg->ops_cl.cpl_obj;
cl_object_attr_lock(obj);
result = cl_object_attr_get(env, obj, attr);
/* statistic */
if (rc == 0 && srvlock) {
- struct lu_device *ld = opg->ops_cl.cpl_obj->co_lu.lo_dev;
+ struct lu_device *ld = osc_page_object(opg)->oo_cl.co_lu.lo_dev;
struct osc_stats *stats = &lu2osc_dev(ld)->osc_stats;
size_t bytes = oap->oap_count;
int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
struct cl_page *page, loff_t offset)
{
- struct obd_export *exp = osc_export(osc);
struct osc_async_page *oap = &ops->ops_oap;
struct page *vmpage = page->cp_vmpage;
ENTRY;
return cfs_size_round(sizeof(*oap));
oap->oap_magic = OAP_MAGIC;
- oap->oap_cli = &exp->exp_obd->u.cli;
oap->oap_obj = osc;
oap->oap_page = vmpage;
struct osc_io *oio = osc_env_io(env);
struct osc_extent *ext = NULL;
struct osc_async_page *oap = &ops->ops_oap;
- struct client_obd *cli = oap->oap_cli;
struct osc_object *osc = oap->oap_obj;
+ struct client_obd *cli = osc_cli(osc);
struct pagevec *pvec = &osc_env_info(env)->oti_pagevec;
pgoff_t index;
unsigned int tmp;
struct osc_page *ops)
{
struct osc_extent *ext = NULL;
- struct osc_object *obj = cl2osc(ops->ops_cl.cpl_obj);
+ struct osc_object *obj = osc_page_object(ops);
struct cl_page *cp = ops->ops_cl.cpl_page;
pgoff_t index = osc_index(ops);
struct osc_async_page *oap = &ops->ops_oap;
struct osc_page *ops = pvec[i];
struct cl_page *page = ops->ops_cl.cpl_page;
- if (cl_page_is_vmlocked(env, page) ||
+ if (PageLocked(page->cp_vmpage) ||
PageDirty(page->cp_vmpage) ||
PageWriteback(page->cp_vmpage))
return false;
static void osc_page_transfer_add(const struct lu_env *env,
struct osc_page *opg, enum cl_req_type crt)
{
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
+ struct osc_object *obj = osc_page_object(opg);
osc_lru_use(osc_cli(obj), opg);
}
{
struct osc_page *opg = cl2osc_page(slice);
struct osc_async_page *oap = &opg->ops_oap;
- struct osc_object *obj = cl2osc(slice->cpl_obj);
+ struct osc_object *obj = osc_page_object(opg);
struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p %lu: "
/* 2 */
oap->oap_obj_off, oap->oap_page_off, oap->oap_count,
oap->oap_async_flags, oap->oap_brw_flags,
- oap->oap_request, oap->oap_cli, obj,
+ oap->oap_request, cli, obj,
/* 3 */
opg->ops_transfer_pinned,
osc_submit_duration(opg), opg->ops_srvlock,
const struct cl_page_slice *slice)
{
struct osc_page *opg = cl2osc_page(slice);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
+ struct osc_object *obj = osc_page_object(opg);
int rc;
ENTRY;
const struct cl_page_slice *slice, size_t to)
{
struct osc_page *opg = cl2osc_page(slice);
- struct cl_object *obj = opg->ops_cl.cpl_obj;
+ struct cl_object *obj = osc2cl(osc_page_object(opg));
osc_page_touch_at(env, obj, osc_index(opg), to);
}
unsigned long budget;
LASSERT(cache != NULL);
- budget = cache->ccc_lru_max / (atomic_read(&cache->ccc_users) - 2);
+ budget = cache->ccc_lru_max / (refcount_read(&cache->ccc_users) - 2);
/* if it's going to run out LRU slots, we should free some, but not
* too much to maintain faireness among OSCs. */
cache->ccc_lru_shrinkers++;
list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
- max_scans = atomic_read(&cache->ccc_users) - 2;
+ max_scans = refcount_read(&cache->ccc_users) - 2;
while (--max_scans > 0 &&
(scan = list_first_entry_or_null(&cache->ccc_lru,
struct client_obd,
/* lru cleanup */
if (cli->cl_cache != NULL) {
- LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
+ LASSERT(refcount_read(&cli->cl_cache->ccc_users) > 0);
spin_lock(&cli->cl_cache->ccc_lru_lock);
list_del_init(&cli->cl_lru_osc);
spin_unlock(&cli->cl_cache->ccc_lru_lock);