Whamcloud - gitweb
LU-1346 libcfs: cleanup macros in kp30.h 76/4776/5
authorJames Simmons <uja.ornl@gmail.com>
Mon, 15 Jul 2013 21:39:09 +0000 (17:39 -0400)
committerOleg Drokin <oleg.drokin@intel.com>
Sat, 10 Aug 2013 04:50:05 +0000 (04:50 +0000)
Some macro cleanup in kp30.h

Some other changes:
. removes some unused/reduplicate macros such as
our_recalc_sigpending, prepare_work, cfs_get_work_data, strtok,
work_struct_t, SEM_COUNT, PORTAL_SYMBOL_REGISTER/UNREGISTER,
cfs_num_present_cpus, cfs_num_present_cpus
. moves some different kernel version compatible macros/inlines to
  portals_compat25.h
. needs not differentiate CONFIG_SMP for LASSERT_SPIN_LOCKED
. get/put_cpu is avaiable for both SMP and !SMP

This patch does not change LWT(Light-weight trace) related code.

Signed-off-by: Liu Xuezhao <xuezhao.liu@emc.com>
Signed-off-by: Peng Tao <tao.peng@emc.com>
Signed-off-by: James Simmons <uja.ornl@gmail.com>
Change-Id: Iaa3cda88d539f3d7e677b7accda99b476daeddca
Reviewed-on: http://review.whamcloud.com/4776
Reviewed-by: Keith Mannthey <keith.mannthey@intel.com>
Tested-by: Hudson
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Yang Sheng <yang.sheng@intel.com>
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
43 files changed:
contrib/scripts/libcfs_cleanup.sed
libcfs/include/libcfs/darwin/kp30.h
libcfs/include/libcfs/linux/kp30.h
libcfs/include/libcfs/user-lock.h
libcfs/include/libcfs/user-prim.h
libcfs/include/libcfs/winnt/kp30.h
libcfs/include/libcfs/winnt/winnt-prim.h
libcfs/libcfs/darwin/darwin-tracefile.c
libcfs/libcfs/debug.c
libcfs/libcfs/linux/linux-tracefile.c
libcfs/libcfs/lwt.c
libcfs/libcfs/posix/posix-debug.c
libcfs/libcfs/tracefile.c
libcfs/libcfs/tracefile.h
libcfs/libcfs/winnt/winnt-tcpip.c
libcfs/libcfs/winnt/winnt-tracefile.c
lnet/klnds/ptllnd/ptllnd.h
lnet/klnds/qswlnd/qswlnd.c
lnet/klnds/socklnd/socklnd_lib-winnt.h
lustre/fld/fld_index.c
lustre/include/lprocfs_status.h
lustre/include/lustre_dlm.h
lustre/include/lustre_net.h
lustre/ldlm/ldlm_resource.c
lustre/llite/dir.c
lustre/llite/llite_internal.h
lustre/lov/lov_merge.c
lustre/obdclass/cl_lock.c
lustre/obdclass/cl_object.c
lustre/obdclass/cl_page.c
lustre/obdclass/lprocfs_status.c
lustre/ofd/ofd_fmd.c
lustre/ofd/ofd_grant.c
lustre/ofd/ofd_lvb.c
lustre/osc/osc_cache.c
lustre/osd-ldiskfs/osd_iam.c
lustre/ptlrpc/client.c
lustre/ptlrpc/gss/gss_pipefs.c
lustre/ptlrpc/import.c
lustre/ptlrpc/pinger.c
lustre/ptlrpc/ptlrpcd.c
lustre/ptlrpc/sec_bulk.c
lustre/ptlrpc/service.c

index 7aea56f..59fc449 100644 (file)
@@ -353,3 +353,29 @@ s/cfs_mem_cache_destroy/kmem_cache_destroy/g
 s/cfs_mem_cache_alloc/kmem_cache_alloc/g
 s/cfs_mem_cache_free/kmem_cache_free/g
 s/cfs_mem_is_in_cache/kmem_is_in_cache/g
+
+################################################################################
+# macros in kp30.h
+
+s/\bcfs_num_online_cpus\b/num_online_cpus/g
+/#[ \t]*define[ \t]*\bnum_online_cpus\b *( *)[ \t]*\bnum_online_cpus\b *( *)/d
+s/\bwait_on_page\b/wait_on_page_locked/g
+/#[ \t]*define[ \t]*\bwait_on_page_locked\b[ \t]*\bwait_on_page_locked\b/d
+s/^\([ \t]*\)LASSERT_SPIN_LOCKED\b *\((.*)\)/\1LASSERT(spin_is_locked\2)/g
+/#[ \t]*define[ \t]*\bLASSERT_SPIN_LOCKED\b/d
+s/^\([ \t]*\)LINVRNT_SPIN_LOCKED\b *\((.*)\)/\1LINVRNT(spin_is_locked\2)/g
+/#[ \t]*define[ \t]*\bLINVRNT_SPIN_LOCKED\b/d
+s/^\([ \t]*\)LASSERT_SEM_LOCKED\b *\((.*)\)/\1LASSERT(down_trylock\2 != 0)/g
+/#[ \t]*define[ \t]*\bLASSERT_SEM_LOCKED\b/d
+s/^\([ \t]*\)LASSERT_MUTEX_LOCKED\b *\((.*)\)/\1LASSERT(mutex_is_locked\2)/g
+/#[ \t]*define[ \t]*\bLASSERT_MUTEX_LOCKED\b/d
+s/\bLIBCFS_PANIC\b/panic/g
+/#[ \t]*define[ \t]*\bpanic\b *( *\w* *)[ \t]*\bpanic\b *( *\w* *)/d
+s/\bcfs_num_possible_cpus\b/num_possible_cpus/g
+/#[ \t]*define[ \t]*\bnum_possible_cpus\b *( *)[ \t]*\bnum_possible_cpus\b *( *)/d
+s/\bcfs_smp_processor_id\b/smp_processor_id/g
+/#[ \t]*define[ \t]*\bsmp_processor_id\b *( *)[ \t]*\bsmp_processor_id\b *( *)/d
+s/\bcfs_get_cpu\b/get_cpu/g
+/#[ \t]*define[ \t]*\bget_cpu\b *( *)[ \t]*\bget_cpu\b *( *)/d
+s/\bcfs_put_cpu\b/put_cpu/g
+/#[ \t]*define[ \t]*\bput_cpu\b *( *)[ \t]*\bput_cpu\b *( *)/d
index c23a9b7..27db508 100644 (file)
 #include <libcfs/darwin/darwin-prim.h>
 #include <lnet/lnet.h>
 
-#ifdef CONFIG_SMP
-#define LASSERT_SPIN_LOCKED(lock) do {} while(0) /* XXX */
-#else
-#define LASSERT_SPIN_LOCKED(lock) do {} while(0)
-#endif
-#define LASSERT_SEM_LOCKED(sem) do {} while(0) /* XXX */
-
-#define LIBCFS_PANIC(msg) panic(msg)
 #error libcfs_register_panic_notifier() missing
 #error libcfs_unregister_panic_notifier() missing
 
 /* --------------------------------------------------------------------- */
 
-#define PORTAL_SYMBOL_REGISTER(x)               cfs_symbol_register(#x, &x)
-#define PORTAL_SYMBOL_UNREGISTER(x)             cfs_symbol_unregister(#x)
-
 #define PORTAL_SYMBOL_GET(x)                    ((typeof(&x))cfs_symbol_get(#x))
 #define PORTAL_SYMBOL_PUT(x)                    cfs_symbol_put(#x)
 
index 4e2efba..2c4ecb5 100644 (file)
 
 #include <libcfs/linux/portals_compat25.h>
 
-#define cfs_get_work_data(type,field,data) container_of(data,type,field)
-
-#define cfs_num_online_cpus() num_online_cpus()
-#define wait_on_page wait_on_page_locked
-#define our_recalc_sigpending(current) recalc_sigpending()
-#define strtok(a,b) strpbrk(a, b)
-#define work_struct_t      struct work_struct
-
-#ifdef CONFIG_SMP
-#define LASSERT_SPIN_LOCKED(lock) LASSERT(spin_is_locked(lock))
-#define LINVRNT_SPIN_LOCKED(lock) LINVRNT(spin_is_locked(lock))
-#else
-#define LASSERT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
-#define LINVRNT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
-#endif
-
-#define LASSERT_SEM_LOCKED(sem) LASSERT(down_trylock(sem) != 0)
-#define LASSERT_MUTEX_LOCKED(x) LASSERT(mutex_is_locked(x))
-
-#define LIBCFS_PANIC(msg)            panic(msg)
-
 /* ------------------------------------------------------------------- */
 
-#define PORTAL_SYMBOL_REGISTER(x)
-#define PORTAL_SYMBOL_UNREGISTER(x)
-
 #define PORTAL_SYMBOL_GET(x) symbol_get(x)
 #define PORTAL_SYMBOL_PUT(x) symbol_put(x)
 
 # define printf(format, b...) CDEBUG(D_OTHER, format , ## b)
 # define time(a) CURRENT_TIME
 
-# define cfs_num_possible_cpus() num_possible_cpus()
-# define cfs_num_present_cpus()  num_present_cpus()
-
 /******************************************************************************/
 /* Light-weight trace
  * Support for temporary event tracing with minimal Heisenberg effect. */
@@ -304,21 +277,6 @@ extern int  lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size,
 
 #undef _LWORDSIZE
 
-#define cfs_smp_processor_id()  smp_processor_id()
-
-#ifndef get_cpu
-# ifdef CONFIG_PREEMPT
-#  define cfs_get_cpu()  ({ preempt_disable(); smp_processor_id(); })
-#  define cfs_put_cpu()  preempt_enable()
-# else
-#  define cfs_get_cpu()  smp_processor_id()
-#  define cfs_put_cpu()
-# endif
-#else
-# define cfs_get_cpu()   get_cpu()
-# define cfs_put_cpu()   put_cpu()
-#endif /* get_cpu & put_cpu */
-
 #ifdef HAVE_SYSCTL_CTLNAME
 #define INIT_CTL_NAME(a) .ctl_name = a,
 #define INIT_STRATEGY(a) .strategy = a,
index 0bae9c1..8c6b27a 100644 (file)
@@ -89,10 +89,6 @@ struct spin_lock { int foo; };
 typedef struct spin_lock spinlock_t;
 
 #define DEFINE_SPINLOCK(lock)          spinlock_t lock = { }
-#define LASSERT_SPIN_LOCKED(lock)      do { (void)sizeof(lock); } while (0)
-#define LINVRNT_SPIN_LOCKED(lock)      do { (void)sizeof(lock); } while (0)
-#define LASSERT_SEM_LOCKED(sem)                do { (void)sizeof(sem); } while (0)
-#define LASSERT_MUTEX_LOCKED(x)                do { (void)sizeof(x); } while (0)
 #define __SPIN_LOCK_UNLOCKED(x)                ((spinlock_t) {})
 
 void spin_lock_init(spinlock_t *lock);
index c4e806c..2734aa6 100644 (file)
@@ -59,24 +59,13 @@ typedef struct proc_dir_entry           cfs_proc_dir_entry_t;
  * Just present a single processor until will add thread support.
  */
 #ifndef smp_processor_id
-# define cfs_smp_processor_id() 0
-#else
-# define cfs_smp_processor_id() smp_processor_id()
+# define smp_processor_id() 0
 #endif
 #ifndef num_online_cpus
-# define cfs_num_online_cpus() 1
-#else
-# define cfs_num_online_cpus() num_online_cpus()
+# define num_online_cpus() 1
 #endif
 #ifndef num_possible_cpus
-# define cfs_num_possible_cpus() 1
-#else
-# define cfs_num_possible_cpus() num_possible_cpus()
-#endif
-#ifndef num_present_cpus
-# define cfs_num_present_cpus() 1
-#else
-# define cfs_num_present_cpus() num_present_cpus()
+# define num_possible_cpus() 1
 #endif
 /*
  * Wait Queue.
index 2b71184..0ef2911 100644 (file)
 
 #define CFS_SYSFS_MODULE_PARM    0 /* no sysfs access to module parameters */
 
-#define LASSERT_SPIN_LOCKED(lock) do {} while(0)
-#define LASSERT_SEM_LOCKED(sem) LASSERT(down_trylock(sem) != 0)
 
 /* winnt panic */
 void libcfs_panic(char *msg);
-#define LIBCFS_PANIC(msg) libcfs_panic(msg)
+#define panic(msg) libcfs_panic(msg)
 void libcfs_register_panic_notifier();
 void libcfs_unregister_panic_notifier();
 
@@ -87,11 +85,9 @@ void libcfs_unregister_panic_notifier();
 #define cfs_assert     _ASSERT
 
 #ifndef get_cpu
-#define cfs_get_cpu() cfs_smp_processor_id()
-#define cfs_put_cpu() do { } while (0)
+#define get_cpu() smp_processor_id()
+#define put_cpu() do { } while (0)
 #else
-#define cfs_get_cpu() get_cpu()
-#define cfs_put_cpu() put_cpu()
 #endif
 
 #endif /* End of !__KERNEL__ */
index 1f98fd5..d7d388a 100644 (file)
@@ -838,10 +838,9 @@ libcfs_arch_cleanup(void);
 #define SMP_CACHE_BYTES             128
 #define CFS_NR_CPUS                 (32)
 #define smp_num_cpus                ((CCHAR)KeNumberProcessors)
-#define cfs_num_possible_cpus()     smp_num_cpus
-#define cfs_num_present_cpus()      smp_num_cpus
-#define cfs_num_online_cpus()       smp_num_cpus
-#define cfs_smp_processor_id()     ((USHORT)KeGetCurrentProcessorNumber())
+#define num_possible_cpus()     smp_num_cpus
+#define num_online_cpus()       smp_num_cpus
+#define smp_processor_id()         ((USHORT)KeGetCurrentProcessorNumber())
 #define smp_call_function(f, a, n, w)          do {} while(0)
 #define smp_rmb()                   do {} while(0)
 
index f8832a0..063cb80 100644 (file)
@@ -171,7 +171,7 @@ set_ptldebug_header(struct ptldebug_header *header, int subsys, int mask,
        do_gettimeofday(&tv);
        header->ph_subsys = subsys;
        header->ph_mask = mask;
-       header->ph_cpu_id = cfs_smp_processor_id();
+       header->ph_cpu_id = smp_processor_id();
        header->ph_type = 0;
        header->ph_sec = (__u32)tv.tv_sec;
        header->ph_usec = tv.tv_usec;
index cfa027e..bc5454c 100644 (file)
@@ -408,15 +408,15 @@ int libcfs_debug_init(unsigned long bufsize)
                         libcfs_debug_file_path, PATH_MAX-1);
         }
 
-        /* If libcfs_debug_mb is set to an invalid value or uninitialized
-         * then just make the total buffers smp_num_cpus * TCD_MAX_PAGES */
-        if (max > cfs_trace_max_debug_mb() || max < cfs_num_possible_cpus()) {
-                max = TCD_MAX_PAGES;
-        } else {
-                max = (max / cfs_num_possible_cpus());
+       /* If libcfs_debug_mb is set to an invalid value or uninitialized
+        * then just make the total buffers smp_num_cpus * TCD_MAX_PAGES */
+       if (max > cfs_trace_max_debug_mb() || max < num_possible_cpus()) {
+               max = TCD_MAX_PAGES;
+       } else {
+               max = (max / num_possible_cpus());
                max = (max << (20 - PAGE_CACHE_SHIFT));
-        }
-        rc = cfs_tracefile_init(max);
+       }
+       rc = cfs_tracefile_init(max);
 
         if (rc == 0)
                 libcfs_register_panic_notifier();
index a0168dc..0266fc9 100644 (file)
@@ -64,7 +64,7 @@ int cfs_tracefile_init_arch()
        for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
                cfs_trace_data[i] =
                        kmalloc(sizeof(union cfs_trace_data_union) *
-                               cfs_num_possible_cpus(), GFP_KERNEL);
+                               num_possible_cpus(), GFP_KERNEL);
                if (cfs_trace_data[i] == NULL)
                        goto out;
 
@@ -78,7 +78,7 @@ int cfs_tracefile_init_arch()
                tcd->tcd_cpu = j;
        }
 
-       for (i = 0; i < cfs_num_possible_cpus(); i++)
+       for (i = 0; i < num_possible_cpus(); i++)
                for (j = 0; j < 3; j++) {
                        cfs_trace_console_buffers[i][j] =
                                kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
@@ -101,7 +101,7 @@ void cfs_tracefile_fini_arch()
        int    i;
        int    j;
 
-       for (i = 0; i < cfs_num_possible_cpus(); i++)
+       for (i = 0; i < num_possible_cpus(); i++)
                for (j = 0; j < 3; j++)
                        if (cfs_trace_console_buffers[i][j] != NULL) {
                                kfree(cfs_trace_console_buffers[i][j]);
@@ -200,7 +200,7 @@ cfs_set_ptldebug_header(struct ptldebug_header *header,
 
        header->ph_subsys = msgdata->msg_subsys;
        header->ph_mask = msgdata->msg_mask;
-       header->ph_cpu_id = cfs_smp_processor_id();
+       header->ph_cpu_id = smp_processor_id();
        header->ph_type = cfs_trace_buf_idx_get();
        header->ph_sec = (__u32)tv.tv_sec;
        header->ph_usec = tv.tv_usec;
index 0666b4c..3f7df0d 100644 (file)
@@ -105,8 +105,8 @@ lwt_control (int enable, int clear)
                 cfs_schedule_timeout(10);
         }
 
-        for (i = 0; i < cfs_num_online_cpus(); i++) {
-                p = lwt_cpus[i].lwtc_current_page;
+       for (i = 0; i < num_online_cpus(); i++) {
+               p = lwt_cpus[i].lwtc_current_page;
 
                 if (p == NULL)
                         return (-ENODATA);
@@ -141,19 +141,18 @@ lwt_snapshot(cfs_cycles_t *now, int *ncpu, int *total_size,
        int          i;
        int          j;
 
-        if (!cfs_capable(CFS_CAP_SYS_ADMIN))
-                return (-EPERM);
+       if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+               return (-EPERM);
 
-        *ncpu = cfs_num_online_cpus();
-        *total_size = cfs_num_online_cpus() * lwt_pages_per_cpu *
-                bytes_per_page;
-        *now = get_cycles();
+       *ncpu = num_online_cpus();
+       *total_size = num_online_cpus() * lwt_pages_per_cpu * bytes_per_page;
+       *now = get_cycles();
 
-        if (user_ptr == NULL)
-                return (0);
+       if (user_ptr == NULL)
+               return (0);
 
-        for (i = 0; i < cfs_num_online_cpus(); i++) {
-                p = lwt_cpus[i].lwtc_current_page;
+       for (i = 0; i < num_online_cpus(); i++) {
+               p = lwt_cpus[i].lwtc_current_page;
 
                 if (p == NULL)
                        return -ENODATA;
@@ -172,24 +171,22 @@ lwt_snapshot(cfs_cycles_t *now, int *ncpu, int *total_size,
         return (0);
 }
 
-int
-lwt_init ()
+int lwt_init ()
 {
        int     i;
-        int     j;
+       int     j;
 
-        for (i = 0; i < cfs_num_online_cpus(); i++)
-                if (lwt_cpus[i].lwtc_current_page != NULL)
-                        return (-EALREADY);
+       for (i = 0; i < num_online_cpus(); i++)
+               if (lwt_cpus[i].lwtc_current_page != NULL)
+                       return (-EALREADY);
 
-        LASSERT (!lwt_enabled);
+       LASSERT (!lwt_enabled);
 
        /* NULL pointers, zero scalars */
        memset (lwt_cpus, 0, sizeof (lwt_cpus));
-       lwt_pages_per_cpu =
-               LWT_MEMORY / (cfs_num_online_cpus() * PAGE_CACHE_SIZE);
+       lwt_pages_per_cpu = LWT_MEMORY / (num_online_cpus() * PAGE_CACHE_SIZE);
 
-       for (i = 0; i < cfs_num_online_cpus(); i++)
+       for (i = 0; i < num_online_cpus(); i++)
                for (j = 0; j < lwt_pages_per_cpu; j++) {
                        struct page *page = alloc_page(GFP_KERNEL);
                        lwt_page_t  *lwtp;
@@ -229,16 +226,15 @@ lwt_init ()
         return (0);
 }
 
-void
-lwt_fini ()
+void lwt_fini ()
 {
-        int    i;
+       int    i;
 
-        lwt_control(0, 0);
+       lwt_control(0, 0);
 
-        for (i = 0; i < cfs_num_online_cpus(); i++)
-                while (lwt_cpus[i].lwtc_current_page != NULL) {
-                        lwt_page_t *lwtp = lwt_cpus[i].lwtc_current_page;
+       for (i = 0; i < num_online_cpus(); i++)
+               while (lwt_cpus[i].lwtc_current_page != NULL) {
+                       lwt_page_t *lwtp = lwt_cpus[i].lwtc_current_page;
 
                         if (cfs_list_empty (&lwtp->lwtp_list)) {
                                 lwt_cpus[i].lwtc_current_page = NULL;
index a24e3e8..68b606e 100644 (file)
@@ -54,7 +54,7 @@ struct utsname *tmp_utsname;
 static char source_nid[sizeof(tmp_utsname->nodename)];
 
 static int source_pid;
-int cfs_smp_processor_id = 1;
+int smp_processor_id = 1;
 char libcfs_debug_file_path[1024];
 FILE *debug_file_fd;
 
index 3a96dae..03f908f 100644 (file)
@@ -165,11 +165,11 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
                        }
                }
 
-                tage->used = 0;
-                tage->cpu = cfs_smp_processor_id();
-                tage->type = tcd->tcd_type;
-                cfs_list_add_tail(&tage->linkage, &tcd->tcd_pages);
-                tcd->tcd_cur_pages++;
+               tage->used = 0;
+               tage->cpu = smp_processor_id();
+               tage->type = tcd->tcd_type;
+               cfs_list_add_tail(&tage->linkage, &tcd->tcd_pages);
+               tcd->tcd_cur_pages++;
 
                 if (tcd->tcd_cur_pages > 8 && thread_running) {
                         struct tracefiled_ctl *tctl = &trace_tctl;
@@ -482,22 +482,22 @@ EXPORT_SYMBOL(libcfs_debug_vmsg2);
 
 void
 cfs_trace_assertion_failed(const char *str,
-                           struct libcfs_debug_msg_data *msgdata)
+                          struct libcfs_debug_msg_data *msgdata)
 {
-        struct ptldebug_header hdr;
+       struct ptldebug_header hdr;
 
-        libcfs_panic_in_progress = 1;
-        libcfs_catastrophe = 1;
-        cfs_mb();
+       libcfs_panic_in_progress = 1;
+       libcfs_catastrophe = 1;
+       cfs_mb();
 
-        cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
+       cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
 
-        cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
-                             msgdata->msg_file, msgdata->msg_fn);
+       cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
+                            msgdata->msg_file, msgdata->msg_fn);
 
-        LIBCFS_PANIC("Lustre debug assertion failure\n");
+       panic("Lustre debug assertion failure\n");
 
-        /* not reached */
+       /* not reached */
 }
 
 static void
@@ -916,37 +916,37 @@ int cfs_trace_daemon_command_usrstr(void *usr_str, int usr_str_nob)
 
 int cfs_trace_set_debug_mb(int mb)
 {
-        int i;
-        int j;
-        int pages;
-        int limit = cfs_trace_max_debug_mb();
-        struct cfs_trace_cpu_data *tcd;
+       int i;
+       int j;
+       int pages;
+       int limit = cfs_trace_max_debug_mb();
+       struct cfs_trace_cpu_data *tcd;
 
-        if (mb < cfs_num_possible_cpus()) {
-                printk(CFS_KERN_WARNING
-                       "Lustre: %d MB is too small for debug buffer size, "
-                       "setting it to %d MB.\n", mb, cfs_num_possible_cpus());
-                mb = cfs_num_possible_cpus();
-        }
+       if (mb < num_possible_cpus()) {
+               printk(CFS_KERN_WARNING
+                      "Lustre: %d MB is too small for debug buffer size, "
+                      "setting it to %d MB.\n", mb, num_possible_cpus());
+               mb = num_possible_cpus();
+       }
 
-        if (mb > limit) {
-                printk(CFS_KERN_WARNING
-                       "Lustre: %d MB is too large for debug buffer size, "
-                       "setting it to %d MB.\n", mb, limit);
-                mb = limit;
-        }
+       if (mb > limit) {
+               printk(CFS_KERN_WARNING
+                      "Lustre: %d MB is too large for debug buffer size, "
+                      "setting it to %d MB.\n", mb, limit);
+               mb = limit;
+       }
 
-        mb /= cfs_num_possible_cpus();
+       mb /= num_possible_cpus();
        pages = mb << (20 - PAGE_CACHE_SHIFT);
 
-        cfs_tracefile_write_lock();
+       cfs_tracefile_write_lock();
 
-        cfs_tcd_for_each(tcd, i, j)
-                tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
+       cfs_tcd_for_each(tcd, i, j)
+               tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
 
-        cfs_tracefile_write_unlock();
+       cfs_tracefile_write_unlock();
 
-        return 0;
+       return 0;
 }
 
 int cfs_trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob)
@@ -1054,16 +1054,16 @@ static int tracefiled(void *arg)
                 if (!cfs_list_empty(&pc.pc_pages)) {
                         int i;
 
-                        printk(CFS_KERN_ALERT "Lustre: trace pages aren't "
-                               " empty\n");
-                        printk(CFS_KERN_ERR "total cpus(%d): ",
-                               cfs_num_possible_cpus());
-                        for (i = 0; i < cfs_num_possible_cpus(); i++)
-                                if (cpu_online(i))
-                                        printk(CFS_KERN_ERR "%d(on) ", i);
-                                else
-                                        printk(CFS_KERN_ERR "%d(off) ", i);
-                        printk(CFS_KERN_ERR "\n");
+                       printk(CFS_KERN_ALERT "Lustre: trace pages aren't "
+                              " empty\n");
+                       printk(CFS_KERN_ERR "total cpus(%d): ",
+                              num_possible_cpus());
+                       for (i = 0; i < num_possible_cpus(); i++)
+                               if (cpu_online(i))
+                                       printk(CFS_KERN_ERR "%d(on) ", i);
+                               else
+                                       printk(CFS_KERN_ERR "%d(off) ", i);
+                       printk(CFS_KERN_ERR "\n");
 
                         i = 0;
                         cfs_list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
index 196ab96..1bdf0a5 100644 (file)
@@ -189,11 +189,11 @@ union cfs_trace_data_union {
 #define TCD_MAX_TYPES      8
 extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[CFS_NR_CPUS];
 
-#define cfs_tcd_for_each(tcd, i, j)                                       \
-    for (i = 0; cfs_trace_data[i] != NULL; i++)                           \
-        for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd);               \
-             j < cfs_num_possible_cpus();                                 \
-             j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
+#define cfs_tcd_for_each(tcd, i, j)                                      \
+    for (i = 0; cfs_trace_data[i] != NULL; i++)                                  \
+       for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd);               \
+            j < num_possible_cpus();                                     \
+            j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
 
 #define cfs_tcd_for_each_type_lock(tcd, i, cpu)                           \
     for (i = 0; cfs_trace_data[i] &&                                      \
@@ -279,38 +279,34 @@ extern void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
 extern char *cfs_trace_console_buffers[CFS_NR_CPUS][CFS_TCD_TYPE_MAX];
 extern cfs_trace_buf_type_t cfs_trace_buf_idx_get(void);
 
-static inline char *
-cfs_trace_get_console_buffer(void)
+static inline char *cfs_trace_get_console_buffer(void)
 {
-        unsigned int i = cfs_get_cpu();
-        unsigned int j = cfs_trace_buf_idx_get();
+       unsigned int i = get_cpu();
+       unsigned int j = cfs_trace_buf_idx_get();
 
-        return cfs_trace_console_buffers[i][j];
+       return cfs_trace_console_buffers[i][j];
 }
 
 static inline void
 cfs_trace_put_console_buffer(char *buffer)
 {
-        cfs_put_cpu();
+       put_cpu();
 }
 
-static inline struct cfs_trace_cpu_data *
-cfs_trace_get_tcd(void)
+static inline struct cfs_trace_cpu_data *cfs_trace_get_tcd(void)
 {
        struct cfs_trace_cpu_data *tcd =
-                &(*cfs_trace_data[cfs_trace_buf_idx_get()])[cfs_get_cpu()].tcd;
+               &(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd;
 
        cfs_trace_lock_tcd(tcd, 0);
 
        return tcd;
 }
 
-static inline void
-cfs_trace_put_tcd (struct cfs_trace_cpu_data *tcd)
+static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd)
 {
        cfs_trace_unlock_tcd(tcd, 0);
-
-       cfs_put_cpu();
+       put_cpu();
 }
 
 int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, int gfp,
index 04f9b66..6edddcc 100644 (file)
@@ -5886,7 +5886,7 @@ ks_init_tdi_data()
     }
 
     /* initialize engine threads list */
-    ks_data.ksnd_engine_nums = cfs_num_online_cpus();
+    ks_data.ksnd_engine_nums = num_online_cpus();
     if (ks_data.ksnd_engine_nums < 4) {
         ks_data.ksnd_engine_nums = 4;
     }
index 9e34cd3..c6fd8a0 100644 (file)
@@ -75,7 +75,7 @@ int cfs_tracefile_init_arch()
                tcd->tcd_cpu = (USHORT)j;
        }
 
-       for (i = 0; i < cfs_num_possible_cpus(); i++)
+       for (i = 0; i < num_possible_cpus(); i++)
                for (j = 0; j < CFS_TCD_TYPE_MAX; j++) {
                        cfs_trace_console_buffers[i][j] =
                                kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
@@ -99,7 +99,7 @@ void cfs_tracefile_fini_arch()
        int    i;
        int    j;
 
-       for (i = 0; i < cfs_num_possible_cpus(); i++) {
+       for (i = 0; i < num_possible_cpus(); i++) {
                for (j = 0; j < CFS_TCD_TYPE_MAX; j++) {
                        if (cfs_trace_console_buffers[i][j] != NULL) {
                                kfree(cfs_trace_console_buffers[i][j]);
@@ -175,7 +175,7 @@ cfs_set_ptldebug_header(struct ptldebug_header *header, int subsys, int mask,
 
        header->ph_subsys = subsys;
        header->ph_mask = mask;
-       header->ph_cpu_id = cfs_smp_processor_id();
+       header->ph_cpu_id = smp_processor_id();
        header->ph_type = cfs_trace_buf_idx_get();
        header->ph_sec = (__u32)tv.tv_sec;
        header->ph_usec = tv.tv_usec;
index e6fd99b..76844cd 100644 (file)
@@ -79,7 +79,7 @@
 //#define PJK_DEBUGGING
 
 #ifdef CONFIG_SMP
-# define PTLLND_N_SCHED         cfs_num_online_cpus()   /* # schedulers */
+# define PTLLND_N_SCHED         num_online_cpus()   /* # schedulers */
 #else
 # define PTLLND_N_SCHED         1                   /* # schedulers */
 #endif
index 4df26ee..dcee1a8 100644 (file)
@@ -525,7 +525,7 @@ kqswnal_startup (lnet_ni_t *ni)
 
        /**********************************************************************/
        /* Spawn scheduling threads */
-       for (i = 0; i < cfs_num_online_cpus(); i++) {
+       for (i = 0; i < num_online_cpus(); i++) {
                rc = kqswnal_thread_start(kqswnal_scheduler, NULL,
                                          "kqswnal_sched");
                if (rc != 0)
index 1bd51c2..d945c20 100644 (file)
@@ -51,7 +51,7 @@ int ksocknal_nsched(void)
 static inline int
 ksocknal_nsched(void)
 {
-        return cfs_num_online_cpus();
+       return num_online_cpus();
 }
 
 static inline int
index c9a3546..32a5051 100644 (file)
@@ -173,7 +173,7 @@ int fld_index_create(const struct lu_env *env, struct lu_server_fld *fld,
 
        info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
 
-       LASSERT_MUTEX_LOCKED(&fld->lsf_lock);
+       LASSERT(mutex_is_locked(&fld->lsf_lock));
 
        range = &info->fti_lrange;
        memset(range, 0, sizeof(*range));
index ff60773..cb159d2 100644 (file)
@@ -417,12 +417,12 @@ static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int opc,
                                spin_lock(&stats->ls_lock);
                        return 0;
                } else {
-                       unsigned int cpuid = cfs_get_cpu();
+                       unsigned int cpuid = get_cpu();
 
                        if (unlikely(stats->ls_percpu[cpuid] == NULL)) {
                                rc = lprocfs_stats_alloc_one(stats, cpuid);
                                if (rc < 0) {
-                                       cfs_put_cpu();
+                                       put_cpu();
                                        return rc;
                                }
                        }
@@ -458,7 +458,7 @@ static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc,
                                spin_unlock(&stats->ls_lock);
                        }
                } else {
-                       cfs_put_cpu();
+                       put_cpu();
                }
                return;
 
index c210f94..9d376c9 100644 (file)
@@ -72,7 +72,7 @@ struct obd_device;
 
 #define OBD_LDLM_DEVICENAME  "ldlm"
 
-#define LDLM_DEFAULT_LRU_SIZE (100 * cfs_num_online_cpus())
+#define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
 #define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000))
 #define LDLM_CTIME_AGE_LIMIT (10)
 #define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024
@@ -1491,7 +1491,7 @@ static inline void unlock_res(struct ldlm_resource *res)
 /** Check if resource is already locked, assert if not. */
 static inline void check_res_locked(struct ldlm_resource *res)
 {
-        LASSERT_SPIN_LOCKED(&res->lr_lock);
+       LASSERT(spin_is_locked(&res->lr_lock));
 }
 
 struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock);
index e6a8c4c..339cd7c 100644 (file)
 #define LDLM_THR_FACTOR                8
 #define LDLM_NTHRS_INIT                PTLRPC_NTHRS_INIT
 #define LDLM_NTHRS_BASE                24
-#define LDLM_NTHRS_MAX         (cfs_num_online_cpus() == 1 ? 64 : 128)
+#define LDLM_NTHRS_MAX         (num_online_cpus() == 1 ? 64 : 128)
 
 #define LDLM_BL_THREADS   LDLM_NTHRS_AUTO_INIT
 #define LDLM_CLIENT_NBUFS 1
index b86c66d..8343068 100644 (file)
@@ -1033,7 +1033,7 @@ void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
                                       ldlm_side_t client)
 {
        LASSERT(!cfs_list_empty(&ns->ns_list_chain));
-       LASSERT_MUTEX_LOCKED(ldlm_namespace_lock(client));
+       LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
        cfs_list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
 }
 
@@ -1042,7 +1042,7 @@ void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
                                         ldlm_side_t client)
 {
        LASSERT(!cfs_list_empty(&ns->ns_list_chain));
-       LASSERT_MUTEX_LOCKED(ldlm_namespace_lock(client));
+       LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
        cfs_list_move_tail(&ns->ns_list_chain,
                           ldlm_namespace_inactive_list(client));
 }
@@ -1050,10 +1050,10 @@ void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
 /** Should be called with ldlm_namespace_lock(client) taken. */
 struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
 {
-        LASSERT_MUTEX_LOCKED(ldlm_namespace_lock(client));
-        LASSERT(!cfs_list_empty(ldlm_namespace_list(client)));
-        return container_of(ldlm_namespace_list(client)->next,
-                struct ldlm_namespace, ns_list_chain);
+       LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
+       LASSERT(!cfs_list_empty(ldlm_namespace_list(client)));
+       return container_of(ldlm_namespace_list(client)->next,
+                           struct ldlm_namespace, ns_list_chain);
 }
 
 /** Create and initialize new resource. */
index 66ca76d..719c8ac 100644 (file)
@@ -289,8 +289,8 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
                  * In fact, page cannot be locked here at all, because
                 * ll_dir_filler() does synchronous io.
                  */
-                wait_on_page(page);
-                if (PageUptodate(page)) {
+               wait_on_page_locked(page);
+               if (PageUptodate(page)) {
                        dp = kmap(page);
                         if (BITS_PER_LONG == 32 && hash64) {
                                 *start = le64_to_cpu(dp->ldp_hash_start) >> 32;
@@ -423,13 +423,13 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
                 GOTO(out_unlock, page);
         }
 
-        wait_on_page(page);
-        (void)kmap(page);
-        if (!PageUptodate(page)) {
-                CERROR("page not updated: "DFID" at "LPU64": rc %d\n",
-                       PFID(ll_inode2fid(dir)), hash, -5);
-                goto fail;
-        }
+       wait_on_page_locked(page);
+       (void)kmap(page);
+       if (!PageUptodate(page)) {
+               CERROR("page not updated: "DFID" at "LPU64": rc %d\n",
+                      PFID(ll_inode2fid(dir)), hash, -5);
+               goto fail;
+       }
         if (!PageChecked(page))
                 ll_check_page(dir, page);
         if (PageError(page)) {
index 64055b4..4322c35 100644 (file)
@@ -1471,8 +1471,8 @@ static inline void cl_isize_unlock(struct inode *inode)
 
 static inline void cl_isize_write_nolock(struct inode *inode, loff_t kms)
 {
-        LASSERT_SEM_LOCKED(&ll_i2info(inode)->lli_size_sem);
-        i_size_write(inode, kms);
+       LASSERT(down_trylock(&ll_i2info(inode)->lli_size_sem) != 0);
+       i_size_write(inode, kms);
 }
 
 static inline void cl_isize_write(struct inode *inode, loff_t kms)
index e52a349..8bb7eab 100644 (file)
@@ -64,7 +64,7 @@ int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
        int i;
        int rc = 0;
 
-       LASSERT_SPIN_LOCKED(&lsm->lsm_lock);
+       LASSERT(spin_is_locked(&lsm->lsm_lock));
 #ifdef __KERNEL__
        LASSERT(lsm->lsm_lock_owner == cfs_curproc_pid());
 #endif
@@ -155,9 +155,9 @@ int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
         __u64 kms;
         ENTRY;
 
-        LASSERT_SPIN_LOCKED(&lsm->lsm_lock);
+       LASSERT(spin_is_locked(&lsm->lsm_lock));
 #ifdef __KERNEL__
-        LASSERT(lsm->lsm_lock_owner == cfs_curproc_pid());
+       LASSERT(lsm->lsm_lock_owner == cfs_curproc_pid());
 #endif
 
         if (shrink) {
index 21a5306..b6e8ff5 100644 (file)
@@ -504,11 +504,11 @@ static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
 
         ENTRY;
 
-        head = cl_object_header(obj);
-        LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
+       head = cl_object_header(obj);
+       LINVRNT(spin_is_locked(&head->coh_lock_guard));
        CS_LOCK_INC(obj, lookup);
-        cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
-                int matched;
+       cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
+               int matched;
 
                 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
                           lock->cll_state < CLS_FREEING &&
index fd036aa..6073df5 100644 (file)
@@ -218,11 +218,11 @@ EXPORT_SYMBOL(cl_object_attr_unlock);
 int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
                        struct cl_attr *attr)
 {
-        struct lu_object_header *top;
-        int result;
+       struct lu_object_header *top;
+       int result;
 
-        LASSERT_SPIN_LOCKED(cl_object_attr_guard(obj));
-        ENTRY;
+       LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
+       ENTRY;
 
         top = obj->co_lu.lo_header;
         result = 0;
@@ -250,11 +250,11 @@ EXPORT_SYMBOL(cl_object_attr_get);
 int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
                        const struct cl_attr *attr, unsigned v)
 {
-        struct lu_object_header *top;
-        int result;
+       struct lu_object_header *top;
+       int result;
 
-        LASSERT_SPIN_LOCKED(cl_object_attr_guard(obj));
-        ENTRY;
+       LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
+       ENTRY;
 
         top = obj->co_lu.lo_header;
         result = 0;
index b7dfab1..acdb2e9 100644 (file)
@@ -155,14 +155,14 @@ cl_page_at_trusted(const struct cl_page *page,
  */
 struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
 {
-        struct cl_page *page;
+       struct cl_page *page;
 
-        LASSERT_SPIN_LOCKED(&hdr->coh_page_guard);
+       LASSERT(spin_is_locked(&hdr->coh_page_guard));
 
-        page = radix_tree_lookup(&hdr->coh_tree, index);
-        if (page != NULL)
-                cl_page_get_trust(page);
-        return page;
+       page = radix_tree_lookup(&hdr->coh_tree, index);
+       if (page != NULL)
+               cl_page_get_trust(page);
+       return page;
 }
 EXPORT_SYMBOL(cl_page_lookup);
 
index ec9e3b3..a30154a 100644 (file)
@@ -1287,7 +1287,7 @@ struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num,
        if (flags & LPROCFS_STATS_FLAG_NOPERCPU)
                num_entry = 1;
        else
-               num_entry = cfs_num_possible_cpus();
+               num_entry = num_possible_cpus();
 
        /* alloc percpu pointers for all possible cpu slots */
        LIBCFS_ALLOC(stats, offsetof(typeof(*stats), ls_percpu[num_entry]));
@@ -1340,7 +1340,7 @@ void lprocfs_free_stats(struct lprocfs_stats **statsh)
        if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU)
                num_entry = 1;
        else
-               num_entry = cfs_num_possible_cpus();
+               num_entry = num_possible_cpus();
 
        percpusize = lprocfs_stats_counter_size(stats);
        for (i = 0; i < num_entry; i++)
index cee17d2..f25ce93 100644 (file)
@@ -48,7 +48,7 @@ static inline void ofd_fmd_put_nolock(struct obd_export *exp,
 {
        struct filter_export_data *fed = &exp->exp_filter_data;
 
-       LASSERT_SPIN_LOCKED(&fed->fed_lock);
+       LASSERT(spin_is_locked(&fed->fed_lock));
        if (--fmd->fmd_refcount == 0) {
                /* XXX when we have persistent reservations and the handle
                 * is stored herein we need to drop it here. */
@@ -115,7 +115,7 @@ static struct ofd_mod_data *ofd_fmd_find_nolock(struct obd_export *exp,
 
        cfs_time_t now = cfs_time_current();
 
-       LASSERT_SPIN_LOCKED(&fed->fed_lock);
+       LASSERT(spin_is_locked(&fed->fed_lock));
 
        cfs_list_for_each_entry_reverse(fmd, &fed->fed_mod_list, fmd_list) {
                if (lu_fid_eq(&fmd->fmd_fid, fid)) {
index a27a73e..0d3ec43 100644 (file)
@@ -233,7 +233,7 @@ static obd_size ofd_grant_space_left(struct obd_export *exp)
        obd_size                 unstable;
 
        ENTRY;
-       LASSERT_SPIN_LOCKED(&ofd->ofd_grant_lock);
+       LASSERT(spin_is_locked(&ofd->ofd_grant_lock));
 
        spin_lock(&ofd->ofd_osfs_lock);
        /* get available space from cached statfs data */
@@ -297,7 +297,7 @@ static void ofd_grant_incoming(const struct lu_env *env, struct obd_export *exp,
        long                             dirty, dropped, grant_chunk;
        ENTRY;
 
-       LASSERT_SPIN_LOCKED(&ofd->ofd_grant_lock);
+       LASSERT(spin_is_locked(&ofd->ofd_grant_lock));
 
        if ((oa->o_valid & (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) !=
                                        (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) {
@@ -375,7 +375,7 @@ static void ofd_grant_shrink(struct obd_export *exp,
        struct obd_device               *obd = exp->exp_obd;
        long                             grant_shrink;
 
-       LASSERT_SPIN_LOCKED(&ofd->ofd_grant_lock);
+       LASSERT(spin_is_locked(&ofd->ofd_grant_lock));
        LASSERT(exp);
        if (left_space >= ofd->ofd_tot_granted_clients *
                          OFD_GRANT_SHRINK_LIMIT(exp))
@@ -456,7 +456,7 @@ static void ofd_grant_check(const struct lu_env *env, struct obd_export *exp,
 
        ENTRY;
 
-       LASSERT_SPIN_LOCKED(&ofd->ofd_grant_lock);
+       LASSERT(spin_is_locked(&ofd->ofd_grant_lock));
 
        if ((oa->o_valid & OBD_MD_FLFLAGS) &&
            (oa->o_flags & OBD_FL_RECOV_RESEND)) {
index 7f41854..9f0c403 100644 (file)
@@ -64,7 +64,7 @@ static int ofd_lvbo_init(struct ldlm_resource *res)
        ENTRY;
 
        LASSERT(res);
-       LASSERT_MUTEX_LOCKED(&res->lr_lvb_mutex);
+       LASSERT(mutex_is_locked(&res->lr_lvb_mutex));
 
        if (res->lr_lvb_data != NULL)
                RETURN(0);
index 8536171..4e7c535 100644 (file)
@@ -1324,7 +1324,7 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
 static void osc_consume_write_grant(struct client_obd *cli,
                                    struct brw_page *pga)
 {
-       LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock);
+       LASSERT(spin_is_locked(&cli->cl_loi_list_lock.lock));
        LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
        cfs_atomic_inc(&obd_dirty_pages);
        cli->cl_dirty += PAGE_CACHE_SIZE;
@@ -1341,7 +1341,7 @@ static void osc_release_write_grant(struct client_obd *cli,
 {
        ENTRY;
 
-       LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock);
+       LASSERT(spin_is_locked(&cli->cl_loi_list_lock.lock));
        if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
                EXIT;
                return;
index b6e16a7..7051454 100644 (file)
@@ -173,7 +173,7 @@ iam_load_idle_blocks(struct iam_container *c, iam_ptr_t blk)
        struct buffer_head *bh;
        int err;
 
-       LASSERT_SEM_LOCKED(&c->ic_idle_sem);
+       LASSERT(down_trylock(&c->ic_idle_sem) != 0);
 
        if (blk == 0)
                return NULL;
index 8b355e7..0ece35a 100644 (file)
@@ -2255,8 +2255,8 @@ static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
  */
 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
 {
-        LASSERT_SPIN_LOCKED(&request->rq_import->imp_lock);
-        (void)__ptlrpc_req_finished(request, 1);
+       LASSERT(spin_is_locked(&request->rq_import->imp_lock));
+       (void)__ptlrpc_req_finished(request, 1);
 }
 EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock);
 
@@ -2404,9 +2404,8 @@ void ptlrpc_free_committed(struct obd_import *imp)
         struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
         ENTRY;
 
-        LASSERT(imp != NULL);
-
-        LASSERT_SPIN_LOCKED(&imp->imp_lock);
+       LASSERT(imp != NULL);
+       LASSERT(spin_is_locked(&imp->imp_lock));
 
 
         if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
@@ -2535,9 +2534,9 @@ EXPORT_SYMBOL(ptlrpc_request_addref);
 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
                                       struct obd_import *imp)
 {
-        cfs_list_t *tmp;
+       cfs_list_t *tmp;
 
-        LASSERT_SPIN_LOCKED(&imp->imp_lock);
+       LASSERT(spin_is_locked(&imp->imp_lock));
 
         if (req->rq_transno == 0) {
                 DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
index 4e704be..3083f17 100644 (file)
@@ -141,19 +141,19 @@ void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *hash)
 static
 void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *freelist)
 {
-        LASSERT_SPIN_LOCKED(&ctx->cc_sec->ps_lock);
-        LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+       LASSERT(spin_is_locked(&ctx->cc_sec->ps_lock));
+       LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
        LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
-        LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
+       LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
 
        clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
 
-        if (cfs_atomic_dec_and_test(&ctx->cc_refcount)) {
-                __cfs_hlist_del(&ctx->cc_cache);
-                cfs_hlist_add_head(&ctx->cc_cache, freelist);
-        } else {
-                cfs_hlist_del_init(&ctx->cc_cache);
-        }
+       if (cfs_atomic_dec_and_test(&ctx->cc_refcount)) {
+               __cfs_hlist_del(&ctx->cc_cache);
+               cfs_hlist_add_head(&ctx->cc_cache, freelist);
+       } else {
+               cfs_hlist_del_init(&ctx->cc_cache);
+       }
 }
 
 /*
@@ -731,17 +731,17 @@ void gss_release_msg(struct gss_upcall_msg *gmsg)
 static
 void gss_unhash_msg_nolock(struct gss_upcall_msg *gmsg)
 {
-        __u32 idx = gmsg->gum_mechidx;
+       __u32 idx = gmsg->gum_mechidx;
 
-        LASSERT(idx < MECH_MAX);
-        LASSERT_SPIN_LOCKED(&upcall_locks[idx]);
+       LASSERT(idx < MECH_MAX);
+       LASSERT(spin_is_locked(&upcall_locks[idx]));
 
-        if (cfs_list_empty(&gmsg->gum_list))
-                return;
+       if (cfs_list_empty(&gmsg->gum_list))
+               return;
 
-        cfs_list_del_init(&gmsg->gum_list);
-        LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 1);
-        cfs_atomic_dec(&gmsg->gum_refcount);
+       cfs_list_del_init(&gmsg->gum_list);
+       LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 1);
+       cfs_atomic_dec(&gmsg->gum_refcount);
 }
 
 static
index 9c1d17b..bc65d15 100644 (file)
@@ -198,7 +198,7 @@ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
 static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp)
 {
        ENTRY;
-       LASSERT_SPIN_LOCKED(&imp->imp_lock);
+       LASSERT(spin_is_locked(&imp->imp_lock));
 
        CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
        imp->imp_invalid = 1;
index 11de560..4396c4b 100644 (file)
@@ -386,7 +386,7 @@ EXPORT_SYMBOL(ptlrpc_pinger_sending_on_import);
 void ptlrpc_pinger_commit_expected(struct obd_import *imp)
 {
        ptlrpc_update_next_ping(imp, 1);
-       LASSERT_SPIN_LOCKED(&imp->imp_lock);
+       LASSERT(spin_is_locked(&imp->imp_lock));
        /*
         * Avoid reading stale imp_connect_data.  When not sure if pings are
         * expected or not on next connection, we assume they are not and force
@@ -470,13 +470,13 @@ static struct timeout_item*
 ptlrpc_pinger_register_timeout(int time, enum timeout_event event,
                                timeout_cb_t cb, void *data)
 {
-        struct timeout_item *item, *tmp;
+       struct timeout_item *item, *tmp;
 
-        LASSERT_MUTEX_LOCKED(&pinger_mutex);
+       LASSERT(mutex_is_locked(&pinger_mutex));
 
-        cfs_list_for_each_entry(item, &timeout_list, ti_chain)
-                if (item->ti_event == event)
-                        goto out;
+       cfs_list_for_each_entry(item, &timeout_list, ti_chain)
+               if (item->ti_event == event)
+                       goto out;
 
         item = ptlrpc_new_timeout(time, event, cb, data);
         if (item) {
index d79c3d3..6631a6d 100644 (file)
@@ -114,10 +114,10 @@ ptlrpcd_select_pc(struct ptlrpc_request *req, pdl_policy_t policy, int index)
                 return &ptlrpcds->pd_thread_rcv;
 
 #ifdef __KERNEL__
-        switch (policy) {
-        case PDL_POLICY_SAME:
-                idx = cfs_smp_processor_id() % ptlrpcds->pd_nthreads;
-                break;
+       switch (policy) {
+       case PDL_POLICY_SAME:
+               idx = smp_processor_id() % ptlrpcds->pd_nthreads;
+               break;
         case PDL_POLICY_LOCAL:
                 /* Before CPU partition patches available, process it the same
                  * as "PDL_POLICY_ROUND". */
@@ -128,7 +128,7 @@ ptlrpcd_select_pc(struct ptlrpc_request *req, pdl_policy_t policy, int index)
                  * CPU partition patches are available. */
                 index = -1;
         case PDL_POLICY_PREFERRED:
-                if (index >= 0 && index < cfs_num_online_cpus()) {
+               if (index >= 0 && index < num_online_cpus()) {
                         idx = index % ptlrpcds->pd_nthreads;
                         break;
                 }
@@ -138,7 +138,7 @@ ptlrpcd_select_pc(struct ptlrpc_request *req, pdl_policy_t policy, int index)
         case PDL_POLICY_ROUND:
                 /* We do not care whether it is strict load balance. */
                 idx = ptlrpcds->pd_index + 1;
-                if (idx == cfs_smp_processor_id())
+               if (idx == smp_processor_id())
                         idx++;
                 idx %= ptlrpcds->pd_nthreads;
                 ptlrpcds->pd_index = idx;
@@ -415,9 +415,9 @@ static int ptlrpcd(void *arg)
        if (test_bit(LIOD_BIND, &pc->pc_flags)) {
                int index = pc->pc_index;
 
-                if (index >= 0 && index < cfs_num_possible_cpus()) {
+               if (index >= 0 && index < num_possible_cpus()) {
                        while (!cpu_online(index)) {
-                               if (++index >= cfs_num_possible_cpus())
+                               if (++index >= num_possible_cpus())
                                        index = 0;
                        }
                        set_cpus_allowed_ptr(cfs_current(),
@@ -547,7 +547,7 @@ static int ptlrpcd_bind(int index, int max)
        {
                int i;
                mask = *cpumask_of_node(cpu_to_node(index));
-               for (i = max; i < cfs_num_online_cpus(); i++)
+               for (i = max; i < num_online_cpus(); i++)
                        cpu_clear(i, mask);
                pc->pc_npartners = cpus_weight(mask) - 1;
                set_bit(LIOD_BIND, &pc->pc_flags);
@@ -828,10 +828,10 @@ static void ptlrpcd_fini(void)
 
 static int ptlrpcd_init(void)
 {
-        int nthreads = cfs_num_online_cpus();
-        char name[16];
-        int size, i = -1, j, rc = 0;
-        ENTRY;
+       int nthreads = num_online_cpus();
+       char name[16];
+       int size, i = -1, j, rc = 0;
+       ENTRY;
 
 #ifdef __KERNEL__
         if (max_ptlrpcds > 0 && max_ptlrpcds < nthreads)
index ea7a3dd..1e7f246 100644 (file)
@@ -449,13 +449,13 @@ out:
 
 static inline void enc_pools_wakeup(void)
 {
-        LASSERT_SPIN_LOCKED(&page_pools.epp_lock);
-        LASSERT(page_pools.epp_waitqlen >= 0);
+       LASSERT(spin_is_locked(&page_pools.epp_lock));
+       LASSERT(page_pools.epp_waitqlen >= 0);
 
-        if (unlikely(page_pools.epp_waitqlen)) {
-                LASSERT(cfs_waitq_active(&page_pools.epp_waitq));
-                cfs_waitq_broadcast(&page_pools.epp_waitq);
-        }
+       if (unlikely(page_pools.epp_waitqlen)) {
+               LASSERT(cfs_waitq_active(&page_pools.epp_waitq));
+               cfs_waitq_broadcast(&page_pools.epp_waitq);
+       }
 }
 
 static int enc_pools_should_grow(int page_needed, long now)
index 05e6ae2..73b2952 100644 (file)
@@ -404,20 +404,20 @@ ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs)
 {
        ENTRY;
 
-       LASSERT_SPIN_LOCKED(&rs->rs_svcpt->scp_rep_lock);
-        LASSERT_SPIN_LOCKED(&rs->rs_lock);
-        LASSERT (rs->rs_difficult);
-        rs->rs_scheduled_ever = 1;  /* flag any notification attempt */
+       LASSERT(spin_is_locked(&rs->rs_svcpt->scp_rep_lock));
+       LASSERT(spin_is_locked(&rs->rs_lock));
+       LASSERT (rs->rs_difficult);
+       rs->rs_scheduled_ever = 1;  /* flag any notification attempt */
 
-        if (rs->rs_scheduled) {     /* being set up or already notified */
-                EXIT;
-                return;
-        }
+       if (rs->rs_scheduled) {     /* being set up or already notified */
+               EXIT;
+               return;
+       }
 
-        rs->rs_scheduled = 1;
-        cfs_list_del_init(&rs->rs_list);
-        ptlrpc_dispatch_difficult_reply(rs);
-        EXIT;
+       rs->rs_scheduled = 1;
+       cfs_list_del_init(&rs->rs_list);
+       ptlrpc_dispatch_difficult_reply(rs);
+       EXIT;
 }
 EXPORT_SYMBOL(ptlrpc_schedule_difficult_reply);