cl_for_each and cl_for_each_reverse are simply aliases
for list_for_each_entry and list_for_each_entry_reverse
There is no point to them so just get rid of them and
eliminate any confusion.
Signed-off-by: Ben Evans <bevans@cray.com>
Change-Id: I4f21cc5020142c82999324f0ae7ccb57704a98bd
Reviewed-on: https://review.whamcloud.com/27385
Tested-by: Jenkins
Reviewed-by: Dmitry Eremin <dmitry.eremin@intel.com>
Reviewed-by: James Simmons <uja.ornl@yahoo.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Jinshan Xiong <jinshan.xiong@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
-#define cl_io_for_each(slice, io) \
- list_for_each_entry((slice), &io->ci_layers, cis_linkage)
-#define cl_io_for_each_reverse(slice, io) \
- list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
-
static inline int cl_io_type_is_valid(enum cl_io_type type)
{
return CIT_READ <= type && type < CIT_OP_NR;
static inline int cl_io_type_is_valid(enum cl_io_type type)
{
return CIT_READ <= type && type < CIT_OP_NR;
LINVRNT(cl_io_invariant(io));
ENTRY;
LINVRNT(cl_io_invariant(io));
ENTRY;
- cl_io_for_each(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
- continue;
- result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
- if (result != 0)
- break;
- }
+ list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
+ if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
+ continue;
+ result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
+ if (result != 0)
+ break;
+ }
if (result == 0) {
cl_io_locks_sort(io);
result = cl_lockset_lock(env, io, &io->ci_lockset);
if (result == 0) {
cl_io_locks_sort(io);
result = cl_lockset_lock(env, io, &io->ci_lockset);
link->cill_fini(env, link);
}
link->cill_fini(env, link);
}
- cl_io_for_each_reverse(scan, io) {
+ list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
}
if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
}
- cl_io_for_each(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
- continue;
- result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
- scan);
- if (result != 0)
- break;
- }
+ list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
+ if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
+ continue;
+ result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
+ scan);
+ if (result != 0)
+ break;
+ }
if (result == 0)
io->ci_state = CIS_IT_STARTED;
RETURN(result);
if (result == 0)
io->ci_state = CIS_IT_STARTED;
RETURN(result);
LINVRNT(cl_io_invariant(io));
ENTRY;
LINVRNT(cl_io_invariant(io));
ENTRY;
- cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
- scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
- }
+ list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
+ if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
+ scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
+ }
io->ci_state = CIS_IT_ENDED;
EXIT;
}
io->ci_state = CIS_IT_ENDED;
EXIT;
}
io->u.ci_rw.rw_range.cir_count -= nob;
/* layers have to be notified. */
io->u.ci_rw.rw_range.cir_count -= nob;
/* layers have to be notified. */
- cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
- scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
- nob);
- }
+ list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
+ if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
+ scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
+ nob);
+ }
ENTRY;
io->ci_state = CIS_IO_GOING;
ENTRY;
io->ci_state = CIS_IO_GOING;
- cl_io_for_each(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
- continue;
- result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
- if (result != 0)
- break;
- }
+ list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
+ if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
+ continue;
+ result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
+ if (result != 0)
+ break;
+ }
if (result >= 0)
result = 0;
RETURN(result);
if (result >= 0)
result = 0;
RETURN(result);
LINVRNT(cl_io_invariant(io));
ENTRY;
LINVRNT(cl_io_invariant(io));
ENTRY;
- cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
- scan->cis_iop->op[io->ci_type].cio_end(env, scan);
- /* TODO: error handling. */
- }
+ list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
+ if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
+ scan->cis_iop->op[io->ci_type].cio_end(env, scan);
+ /* TODO: error handling. */
+ }
io->ci_state = CIS_IO_FINISHED;
EXIT;
}
io->ci_state = CIS_IO_FINISHED;
EXIT;
}
LINVRNT(cl_io_invariant(io));
ENTRY;
LINVRNT(cl_io_invariant(io));
ENTRY;
- cl_io_for_each(scan, io) {
+ list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
if (scan->cis_iop->cio_read_ahead == NULL)
continue;
if (scan->cis_iop->cio_read_ahead == NULL)
continue;
- cl_io_for_each(scan, io) {
+ list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
if (scan->cis_iop->cio_commit_async == NULL)
continue;
result = scan->cis_iop->cio_commit_async(env, scan, queue,
if (scan->cis_iop->cio_commit_async == NULL)
continue;
result = scan->cis_iop->cio_commit_async(env, scan, queue,
- cl_io_for_each(scan, io) {
+ list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
if (scan->cis_iop->cio_submit == NULL)
continue;
result = scan->cis_iop->cio_submit(env, scan, crt, queue);
if (scan->cis_iop->cio_submit == NULL)
continue;
result = scan->cis_iop->cio_submit(env, scan, crt, queue);