}
EXPORT_SYMBOL(cl_page_at);
-#define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
-
-#define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \
-({ \
- const struct lu_env *__env = (_env); \
- struct cl_page *__page = (_page); \
- const struct cl_page_slice *__scan; \
- int __result; \
- ptrdiff_t __op = (_op); \
- int (*__method)_proto; \
- \
- __result = 0; \
- list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + __op); \
- if (__method != NULL) { \
- __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
- if (__result != 0) \
- break; \
- } \
- } \
- if (__result > 0) \
- __result = 0; \
- __result; \
-})
-
-#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
-do { \
- const struct lu_env *__env = (_env); \
- struct cl_page *__page = (_page); \
- const struct cl_page_slice *__scan; \
- ptrdiff_t __op = (_op); \
- void (*__method)_proto; \
- \
- list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + __op); \
- if (__method != NULL) \
- (*__method)(__env, __scan, ## __VA_ARGS__); \
- } \
-} while (0)
-
-#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
-do { \
- const struct lu_env *__env = (_env); \
- struct cl_page *__page = (_page); \
- const struct cl_page_slice *__scan; \
- ptrdiff_t __op = (_op); \
- void (*__method)_proto; \
- \
- /* get to the bottom page. */ \
- list_for_each_entry_reverse(__scan, &__page->cp_layers, \
- cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + __op); \
- if (__method != NULL) \
- (*__method)(__env, __scan, ## __VA_ARGS__); \
- } \
-} while (0)
-
-static int cl_page_invoke(const struct lu_env *env,
- struct cl_io *io, struct cl_page *page, ptrdiff_t op)
-
-{
- PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
- ENTRY;
- RETURN(CL_PAGE_INVOKE(env, page, op,
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *),
- io));
-}
-
-static void cl_page_invoid(const struct lu_env *env,
- struct cl_io *io, struct cl_page *page, ptrdiff_t op)
-
-{
- PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
- ENTRY;
- CL_PAGE_INVOID(env, page, op,
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *), io);
- EXIT;
-}
-
static void cl_page_owner_clear(struct cl_page *page)
{
ENTRY;
void cl_page_disown0(const struct lu_env *env,
struct cl_io *io, struct cl_page *pg)
{
+ const struct cl_page_slice *slice;
enum cl_page_state state;
ENTRY;
* uppermost layer (llite), responsible for VFS/VM interaction runs
* last and can release locks safely.
*/
- CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_disown),
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *),
- io);
+ list_for_each_entry_reverse(slice, &pg->cp_layers, cpl_linkage) {
+ if (slice->cpl_ops->cpo_disown != NULL)
+ (*slice->cpl_ops->cpo_disown)(env, slice, io);
+ }
+
EXIT;
}
static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
struct cl_page *pg, int nonblock)
{
- int result;
+ int result = 0;
+ const struct cl_page_slice *slice;
PINVRNT(env, pg, !cl_page_is_owned(pg, io));
if (pg->cp_state == CPS_FREEING) {
result = -ENOENT;
- } else {
- result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
- (const struct lu_env *,
- const struct cl_page_slice *,
- struct cl_io *, int),
- io, nonblock);
- if (result == 0) {
- PASSERT(env, pg, pg->cp_owner == NULL);
- pg->cp_owner = cl_io_top(io);;
- cl_page_owner_set(pg);
- if (pg->cp_state != CPS_FREEING) {
- cl_page_state_set(env, pg, CPS_OWNED);
- } else {
- cl_page_disown0(env, io, pg);
- result = -ENOENT;
- }
- }
- }
+ goto out;
+ }
+
+ list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
+ if (slice->cpl_ops->cpo_own)
+ result = (*slice->cpl_ops->cpo_own)(env, slice,
+ io, nonblock);
+
+ if (result != 0)
+ break;
+
+ }
+ if (result > 0)
+ result = 0;
+
+ if (result == 0) {
+ PASSERT(env, pg, pg->cp_owner == NULL);
+ pg->cp_owner = cl_io_top(io);
+ cl_page_owner_set(pg);
+ if (pg->cp_state != CPS_FREEING) {
+ cl_page_state_set(env, pg, CPS_OWNED);
+ } else {
+ cl_page_disown0(env, io, pg);
+ result = -ENOENT;
+ }
+ }
+
+out:
PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
RETURN(result);
}
void cl_page_assume(const struct lu_env *env,
struct cl_io *io, struct cl_page *pg)
{
+ const struct cl_page_slice *slice;
+
PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
ENTRY;
io = cl_io_top(io);
- cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
+ list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
+ if (slice->cpl_ops->cpo_assume != NULL)
+ (*slice->cpl_ops->cpo_assume)(env, slice, io);
+ }
+
PASSERT(env, pg, pg->cp_owner == NULL);
pg->cp_owner = cl_io_top(io);
cl_page_owner_set(pg);
void cl_page_unassume(const struct lu_env *env,
struct cl_io *io, struct cl_page *pg)
{
+ const struct cl_page_slice *slice;
+
PINVRNT(env, pg, cl_page_is_owned(pg, io));
PINVRNT(env, pg, cl_page_invariant(pg));
io = cl_io_top(io);
cl_page_owner_clear(pg);
cl_page_state_set(env, pg, CPS_CACHED);
- CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_unassume),
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *),
- io);
+
+ list_for_each_entry_reverse(slice, &pg->cp_layers, cpl_linkage) {
+ if (slice->cpl_ops->cpo_unassume != NULL)
+ (*slice->cpl_ops->cpo_unassume)(env, slice, io);
+ }
+
EXIT;
}
EXPORT_SYMBOL(cl_page_unassume);
void cl_page_discard(const struct lu_env *env,
struct cl_io *io, struct cl_page *pg)
{
+ const struct cl_page_slice *slice;
+
PINVRNT(env, pg, cl_page_is_owned(pg, io));
PINVRNT(env, pg, cl_page_invariant(pg));
- cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
+ list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
+ if (slice->cpl_ops->cpo_discard != NULL)
+ (*slice->cpl_ops->cpo_discard)(env, slice, io);
+ }
}
EXPORT_SYMBOL(cl_page_discard);
*/
static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
{
+ const struct cl_page_slice *slice;
+
ENTRY;
PASSERT(env, pg, pg->cp_state != CPS_FREEING);
* Severe all ways to obtain new pointers to @pg.
*/
cl_page_owner_clear(pg);
-
cl_page_state_set0(env, pg, CPS_FREEING);
- CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_delete),
- (const struct lu_env *, const struct cl_page_slice *));
+ list_for_each_entry_reverse(slice, &pg->cp_layers, cpl_linkage) {
+ if (slice->cpl_ops->cpo_delete != NULL)
+ (*slice->cpl_ops->cpo_delete)(env, slice);
+ }
EXIT;
}
*/
void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
{
+ const struct cl_page_slice *slice;
+
PINVRNT(env, pg, cl_page_invariant(pg));
- CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
- (const struct lu_env *,
- const struct cl_page_slice *, int), uptodate);
+
+ list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
+ if (slice->cpl_ops->cpo_export != NULL)
+ (*slice->cpl_ops->cpo_export)(env, slice, uptodate);
+ }
}
EXPORT_SYMBOL(cl_page_export);
*/
int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
{
- int result;
const struct cl_page_slice *slice;
+ int result;
ENTRY;
slice = container_of(pg->cp_layers.next,
int cl_page_prep(const struct lu_env *env, struct cl_io *io,
struct cl_page *pg, enum cl_req_type crt)
{
- int result;
+ const struct cl_page_slice *slice;
+ int result = 0;
PINVRNT(env, pg, cl_page_is_owned(pg, io));
PINVRNT(env, pg, cl_page_invariant(pg));
*/
if (crt >= CRT_NR)
return -EINVAL;
- result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
- if (result == 0)
+
+ list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
+ if (slice->cpl_ops->cpo_own)
+ result = (*slice->cpl_ops->io[crt].cpo_prep)(env,
+ slice,
+ io);
+
+ if (result != 0)
+ break;
+
+ }
+
+ if (result >= 0) {
+ result = 0;
cl_page_io_start(env, pg, crt);
+ }
CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
return result;
void cl_page_completion(const struct lu_env *env,
struct cl_page *pg, enum cl_req_type crt, int ioret)
{
+ const struct cl_page_slice *slice;
struct cl_sync_io *anchor = pg->cp_sync_io;
PASSERT(env, pg, crt < CRT_NR);
cl_page_state_set(env, pg, CPS_CACHED);
if (crt >= CRT_NR)
return;
- CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
- (const struct lu_env *,
- const struct cl_page_slice *, int), ioret);
+
+ list_for_each_entry_reverse(slice, &pg->cp_layers, cpl_linkage) {
+ if (slice->cpl_ops->io[crt].cpo_completion != NULL)
+ (*slice->cpl_ops->io[crt].cpo_completion)(env, slice,
+ ioret);
+ }
+
if (anchor != NULL) {
LASSERT(pg->cp_sync_io == anchor);
pg->cp_sync_io = NULL;
int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
enum cl_req_type crt)
{
- int result;
+ const struct cl_page_slice *sli;
+ int result = 0;
PINVRNT(env, pg, crt < CRT_NR);
ENTRY;
if (crt >= CRT_NR)
RETURN(-EINVAL);
- result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
- (const struct lu_env *,
- const struct cl_page_slice *));
- if (result == 0) {
+
+ list_for_each_entry(sli, &pg->cp_layers, cpl_linkage) {
+ if (sli->cpl_ops->io[crt].cpo_make_ready != NULL)
+ result = (*sli->cpl_ops->io[crt].cpo_make_ready)(env,
+ sli);
+ if (result != 0)
+ break;
+ }
+
+ if (result >= 0) {
+ result = 0;
PASSERT(env, pg, pg->cp_state == CPS_CACHED);
cl_page_io_start(env, pg, crt);
}
int cl_page_flush(const struct lu_env *env, struct cl_io *io,
struct cl_page *pg)
{
- int result;
+ const struct cl_page_slice *slice;
+ int result = 0;
PINVRNT(env, pg, cl_page_is_owned(pg, io));
PINVRNT(env, pg, cl_page_invariant(pg));
ENTRY;
- result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush));
+ list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
+ if (slice->cpl_ops->cpo_flush != NULL)
+ result = (*slice->cpl_ops->cpo_flush)(env, slice, io);
+ if (result != 0)
+ break;
+ }
+ if (result > 0)
+ result = 0;
CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
RETURN(result);
void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
int from, int to)
{
+ const struct cl_page_slice *slice;
+
PINVRNT(env, pg, cl_page_invariant(pg));
CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
- CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
- (const struct lu_env *,
- const struct cl_page_slice *,int, int),
- from, to);
+ list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
+ if (slice->cpl_ops->cpo_clip != NULL)
+ (*slice->cpl_ops->cpo_clip)(env, slice, from, to);
+ }
}
EXPORT_SYMBOL(cl_page_clip);
void cl_page_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_page *pg)
{
+ const struct cl_page_slice *slice;
+ int result = 0;
+
cl_page_header_print(env, cookie, printer, pg);
- CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
- (const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t p), cookie, printer);
+ list_for_each_entry(slice, &pg->cp_layers, cpl_linkage) {
+ if (slice->cpl_ops->cpo_print != NULL)
+ result = (*slice->cpl_ops->cpo_print)(env, slice,
+ cookie, printer);
+ if (result != 0)
+ break;
+ }
(*printer)(env, cookie, "end page@%p\n", pg);
}
EXPORT_SYMBOL(cl_page_print);
*/
int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
{
- return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel),
- (const struct lu_env *,
- const struct cl_page_slice *));
+ const struct cl_page_slice *slice;
+ int result = 0;
+
+ list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+ if (slice->cpl_ops->cpo_cancel != NULL)
+ result = (*slice->cpl_ops->cpo_cancel)(env, slice);
+ if (result != 0)
+ break;
+ }
+ if (result > 0)
+ result = 0;
+
+ return result;
}
/**