Whamcloud - gitweb
LU-9575 obdclass: remove cl_for_each defines 85/27385/2
authorBen Evans <bevans@cray.com>
Thu, 1 Jun 2017 20:14:00 +0000 (15:14 -0500)
committerOleg Drokin <oleg.drokin@intel.com>
Wed, 7 Jun 2017 20:32:08 +0000 (20:32 +0000)
cl_for_each and cl_for_each_reverse are simply aliases
for list_for_each_entry and list_for_each_entry_reverse
There is no point to them so just get rid of them and
eliminate any confusion.

Signed-off-by: Ben Evans <bevans@cray.com>
Change-Id: I4f21cc5020142c82999324f0ae7ccb57704a98bd
Reviewed-on: https://review.whamcloud.com/27385
Tested-by: Jenkins
Reviewed-by: Dmitry Eremin <dmitry.eremin@intel.com>
Reviewed-by: James Simmons <uja.ornl@yahoo.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Jinshan Xiong <jinshan.xiong@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
lustre/obdclass/cl_io.c

index 12535f4..fc22b2c 100644 (file)
  *
  */
 
  *
  */
 
-#define cl_io_for_each(slice, io) \
-       list_for_each_entry((slice), &io->ci_layers, cis_linkage)
-#define cl_io_for_each_reverse(slice, io)                 \
-       list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
-
 static inline int cl_io_type_is_valid(enum cl_io_type type)
 {
         return CIT_READ <= type && type < CIT_OP_NR;
 static inline int cl_io_type_is_valid(enum cl_io_type type)
 {
         return CIT_READ <= type && type < CIT_OP_NR;
@@ -357,13 +352,13 @@ int cl_io_lock(const struct lu_env *env, struct cl_io *io)
         LINVRNT(cl_io_invariant(io));
 
         ENTRY;
         LINVRNT(cl_io_invariant(io));
 
         ENTRY;
-        cl_io_for_each(scan, io) {
-                if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
-                        continue;
-                result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
-                if (result != 0)
-                        break;
-        }
+       list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
+               if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
+                       continue;
+               result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
+               if (result != 0)
+                       break;
+       }
         if (result == 0) {
                 cl_io_locks_sort(io);
                 result = cl_lockset_lock(env, io, &io->ci_lockset);
         if (result == 0) {
                 cl_io_locks_sort(io);
                 result = cl_lockset_lock(env, io, &io->ci_lockset);
@@ -406,7 +401,7 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
                        link->cill_fini(env, link);
        }
 
                        link->cill_fini(env, link);
        }
 
-       cl_io_for_each_reverse(scan, io) {
+       list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
                if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
                        scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
        }
                if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
                        scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
        }
@@ -433,14 +428,14 @@ int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
 
         ENTRY;
         result = 0;
 
         ENTRY;
         result = 0;
-        cl_io_for_each(scan, io) {
-                if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
-                        continue;
-                result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
-                                                                      scan);
-                if (result != 0)
-                        break;
-        }
+       list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
+               if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
+                       continue;
+               result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
+                                                                     scan);
+               if (result != 0)
+                       break;
+       }
         if (result == 0)
                 io->ci_state = CIS_IT_STARTED;
         RETURN(result);
         if (result == 0)
                 io->ci_state = CIS_IT_STARTED;
         RETURN(result);
@@ -461,10 +456,10 @@ void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
         LINVRNT(cl_io_invariant(io));
 
         ENTRY;
         LINVRNT(cl_io_invariant(io));
 
         ENTRY;
-        cl_io_for_each_reverse(scan, io) {
-                if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
-                        scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
-        }
+       list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
+               if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
+                       scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
+       }
         io->ci_state = CIS_IT_ENDED;
         EXIT;
 }
         io->ci_state = CIS_IT_ENDED;
         EXIT;
 }
@@ -488,11 +483,11 @@ void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
        io->u.ci_rw.rw_range.cir_count -= nob;
 
         /* layers have to be notified. */
        io->u.ci_rw.rw_range.cir_count -= nob;
 
         /* layers have to be notified. */
-        cl_io_for_each_reverse(scan, io) {
-                if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
-                        scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
-                                                                   nob);
-        }
+       list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
+               if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
+                       scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
+                                                                  nob);
+       }
         EXIT;
 }
 
         EXIT;
 }
 
@@ -559,13 +554,13 @@ int cl_io_start(const struct lu_env *env, struct cl_io *io)
         ENTRY;
 
         io->ci_state = CIS_IO_GOING;
         ENTRY;
 
         io->ci_state = CIS_IO_GOING;
-        cl_io_for_each(scan, io) {
-                if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
-                        continue;
-                result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
-                if (result != 0)
-                        break;
-        }
+       list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
+               if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
+                       continue;
+               result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
+               if (result != 0)
+                       break;
+       }
         if (result >= 0)
                 result = 0;
         RETURN(result);
         if (result >= 0)
                 result = 0;
         RETURN(result);
@@ -585,11 +580,11 @@ void cl_io_end(const struct lu_env *env, struct cl_io *io)
         LINVRNT(cl_io_invariant(io));
         ENTRY;
 
         LINVRNT(cl_io_invariant(io));
         ENTRY;
 
-        cl_io_for_each_reverse(scan, io) {
-                if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
-                        scan->cis_iop->op[io->ci_type].cio_end(env, scan);
-                /* TODO: error handling. */
-        }
+       list_for_each_entry_reverse(scan, &io->ci_layers, cis_linkage) {
+               if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
+                       scan->cis_iop->op[io->ci_type].cio_end(env, scan);
+               /* TODO: error handling. */
+       }
         io->ci_state = CIS_IO_FINISHED;
         EXIT;
 }
         io->ci_state = CIS_IO_FINISHED;
         EXIT;
 }
@@ -611,7 +606,7 @@ int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
        LINVRNT(cl_io_invariant(io));
        ENTRY;
 
        LINVRNT(cl_io_invariant(io));
        ENTRY;
 
-       cl_io_for_each(scan, io) {
+       list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
                if (scan->cis_iop->cio_read_ahead == NULL)
                        continue;
 
                if (scan->cis_iop->cio_read_ahead == NULL)
                        continue;
 
@@ -637,7 +632,7 @@ int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
        int result = 0;
        ENTRY;
 
        int result = 0;
        ENTRY;
 
-       cl_io_for_each(scan, io) {
+       list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
                if (scan->cis_iop->cio_commit_async == NULL)
                        continue;
                result = scan->cis_iop->cio_commit_async(env, scan, queue,
                if (scan->cis_iop->cio_commit_async == NULL)
                        continue;
                result = scan->cis_iop->cio_commit_async(env, scan, queue,
@@ -666,7 +661,7 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
        int result = 0;
        ENTRY;
 
        int result = 0;
        ENTRY;
 
-       cl_io_for_each(scan, io) {
+       list_for_each_entry(scan, &io->ci_layers, cis_linkage) {
                if (scan->cis_iop->cio_submit == NULL)
                        continue;
                result = scan->cis_iop->cio_submit(env, scan, crt, queue);
                if (scan->cis_iop->cio_submit == NULL)
                        continue;
                result = scan->cis_iop->cio_submit(env, scan, crt, queue);