s/cfs_mem_cache_alloc/kmem_cache_alloc/g
s/cfs_mem_cache_free/kmem_cache_free/g
s/cfs_mem_is_in_cache/kmem_is_in_cache/g
+
+################################################################################
+# macros in kp30.h
+
+s/\bcfs_num_online_cpus\b/num_online_cpus/g
+/#[ \t]*define[ \t]*\bnum_online_cpus\b *( *)[ \t]*\bnum_online_cpus\b *( *)/d
+s/\bwait_on_page\b/wait_on_page_locked/g
+/#[ \t]*define[ \t]*\bwait_on_page_locked\b[ \t]*\bwait_on_page_locked\b/d
+s/^\([ \t]*\)LASSERT_SPIN_LOCKED\b *\((.*)\)/\1LASSERT(spin_is_locked\2)/g
+/#[ \t]*define[ \t]*\bLASSERT_SPIN_LOCKED\b/d
+s/^\([ \t]*\)LINVRNT_SPIN_LOCKED\b *\((.*)\)/\1LINVRNT(spin_is_locked\2)/g
+/#[ \t]*define[ \t]*\bLINVRNT_SPIN_LOCKED\b/d
+s/^\([ \t]*\)LASSERT_SEM_LOCKED\b *\((.*)\)/\1LASSERT(down_trylock\2 != 0)/g
+/#[ \t]*define[ \t]*\bLASSERT_SEM_LOCKED\b/d
+s/^\([ \t]*\)LASSERT_MUTEX_LOCKED\b *\((.*)\)/\1LASSERT(mutex_is_locked\2)/g
+/#[ \t]*define[ \t]*\bLASSERT_MUTEX_LOCKED\b/d
+s/\bLIBCFS_PANIC\b/panic/g
+/#[ \t]*define[ \t]*\bpanic\b *( *\w* *)[ \t]*\bpanic\b *( *\w* *)/d
+s/\bcfs_num_possible_cpus\b/num_possible_cpus/g
+/#[ \t]*define[ \t]*\bnum_possible_cpus\b *( *)[ \t]*\bnum_possible_cpus\b *( *)/d
+s/\bcfs_smp_processor_id\b/smp_processor_id/g
+/#[ \t]*define[ \t]*\bsmp_processor_id\b *( *)[ \t]*\bsmp_processor_id\b *( *)/d
+s/\bcfs_get_cpu\b/get_cpu/g
+/#[ \t]*define[ \t]*\bget_cpu\b *( *)[ \t]*\bget_cpu\b *( *)/d
+s/\bcfs_put_cpu\b/put_cpu/g
+/#[ \t]*define[ \t]*\bput_cpu\b *( *)[ \t]*\bput_cpu\b *( *)/d
#include <libcfs/darwin/darwin-prim.h>
#include <lnet/lnet.h>
-#ifdef CONFIG_SMP
-#define LASSERT_SPIN_LOCKED(lock) do {} while(0) /* XXX */
-#else
-#define LASSERT_SPIN_LOCKED(lock) do {} while(0)
-#endif
-#define LASSERT_SEM_LOCKED(sem) do {} while(0) /* XXX */
-
-#define LIBCFS_PANIC(msg) panic(msg)
#error libcfs_register_panic_notifier() missing
#error libcfs_unregister_panic_notifier() missing
/* --------------------------------------------------------------------- */
-#define PORTAL_SYMBOL_REGISTER(x) cfs_symbol_register(#x, &x)
-#define PORTAL_SYMBOL_UNREGISTER(x) cfs_symbol_unregister(#x)
-
#define PORTAL_SYMBOL_GET(x) ((typeof(&x))cfs_symbol_get(#x))
#define PORTAL_SYMBOL_PUT(x) cfs_symbol_put(#x)
#include <libcfs/linux/portals_compat25.h>
-#define cfs_get_work_data(type,field,data) container_of(data,type,field)
-
-#define cfs_num_online_cpus() num_online_cpus()
-#define wait_on_page wait_on_page_locked
-#define our_recalc_sigpending(current) recalc_sigpending()
-#define strtok(a,b) strpbrk(a, b)
-#define work_struct_t struct work_struct
-
-#ifdef CONFIG_SMP
-#define LASSERT_SPIN_LOCKED(lock) LASSERT(spin_is_locked(lock))
-#define LINVRNT_SPIN_LOCKED(lock) LINVRNT(spin_is_locked(lock))
-#else
-#define LASSERT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
-#define LINVRNT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
-#endif
-
-#define LASSERT_SEM_LOCKED(sem) LASSERT(down_trylock(sem) != 0)
-#define LASSERT_MUTEX_LOCKED(x) LASSERT(mutex_is_locked(x))
-
-#define LIBCFS_PANIC(msg) panic(msg)
-
/* ------------------------------------------------------------------- */
-#define PORTAL_SYMBOL_REGISTER(x)
-#define PORTAL_SYMBOL_UNREGISTER(x)
-
#define PORTAL_SYMBOL_GET(x) symbol_get(x)
#define PORTAL_SYMBOL_PUT(x) symbol_put(x)
# define printf(format, b...) CDEBUG(D_OTHER, format , ## b)
# define time(a) CURRENT_TIME
-# define cfs_num_possible_cpus() num_possible_cpus()
-# define cfs_num_present_cpus() num_present_cpus()
-
/******************************************************************************/
/* Light-weight trace
* Support for temporary event tracing with minimal Heisenberg effect. */
#undef _LWORDSIZE
-#define cfs_smp_processor_id() smp_processor_id()
-
-#ifndef get_cpu
-# ifdef CONFIG_PREEMPT
-# define cfs_get_cpu() ({ preempt_disable(); smp_processor_id(); })
-# define cfs_put_cpu() preempt_enable()
-# else
-# define cfs_get_cpu() smp_processor_id()
-# define cfs_put_cpu()
-# endif
-#else
-# define cfs_get_cpu() get_cpu()
-# define cfs_put_cpu() put_cpu()
-#endif /* get_cpu & put_cpu */
-
#ifdef HAVE_SYSCTL_CTLNAME
#define INIT_CTL_NAME(a) .ctl_name = a,
#define INIT_STRATEGY(a) .strategy = a,
typedef struct spin_lock spinlock_t;
#define DEFINE_SPINLOCK(lock) spinlock_t lock = { }
-#define LASSERT_SPIN_LOCKED(lock) do { (void)sizeof(lock); } while (0)
-#define LINVRNT_SPIN_LOCKED(lock) do { (void)sizeof(lock); } while (0)
-#define LASSERT_SEM_LOCKED(sem) do { (void)sizeof(sem); } while (0)
-#define LASSERT_MUTEX_LOCKED(x) do { (void)sizeof(x); } while (0)
#define __SPIN_LOCK_UNLOCKED(x) ((spinlock_t) {})
void spin_lock_init(spinlock_t *lock);
* Just present a single processor until will add thread support.
*/
#ifndef smp_processor_id
-# define cfs_smp_processor_id() 0
-#else
-# define cfs_smp_processor_id() smp_processor_id()
+# define smp_processor_id() 0
#endif
#ifndef num_online_cpus
-# define cfs_num_online_cpus() 1
-#else
-# define cfs_num_online_cpus() num_online_cpus()
+# define num_online_cpus() 1
#endif
#ifndef num_possible_cpus
-# define cfs_num_possible_cpus() 1
-#else
-# define cfs_num_possible_cpus() num_possible_cpus()
-#endif
-#ifndef num_present_cpus
-# define cfs_num_present_cpus() 1
-#else
-# define cfs_num_present_cpus() num_present_cpus()
+# define num_possible_cpus() 1
#endif
/*
* Wait Queue.
#define CFS_SYSFS_MODULE_PARM 0 /* no sysfs access to module parameters */
-#define LASSERT_SPIN_LOCKED(lock) do {} while(0)
-#define LASSERT_SEM_LOCKED(sem) LASSERT(down_trylock(sem) != 0)
/* winnt panic */
void libcfs_panic(char *msg);
-#define LIBCFS_PANIC(msg) libcfs_panic(msg)
+#define panic(msg) libcfs_panic(msg)
void libcfs_register_panic_notifier();
void libcfs_unregister_panic_notifier();
#define cfs_assert _ASSERT
#ifndef get_cpu
-#define cfs_get_cpu() cfs_smp_processor_id()
-#define cfs_put_cpu() do { } while (0)
+#define get_cpu() smp_processor_id()
+#define put_cpu() do { } while (0)
#else
-#define cfs_get_cpu() get_cpu()
-#define cfs_put_cpu() put_cpu()
#endif
#endif /* End of !__KERNEL__ */
#define SMP_CACHE_BYTES 128
#define CFS_NR_CPUS (32)
#define smp_num_cpus ((CCHAR)KeNumberProcessors)
-#define cfs_num_possible_cpus() smp_num_cpus
-#define cfs_num_present_cpus() smp_num_cpus
-#define cfs_num_online_cpus() smp_num_cpus
-#define cfs_smp_processor_id() ((USHORT)KeGetCurrentProcessorNumber())
+#define num_possible_cpus() smp_num_cpus
+#define num_online_cpus() smp_num_cpus
+#define smp_processor_id() ((USHORT)KeGetCurrentProcessorNumber())
#define smp_call_function(f, a, n, w) do {} while(0)
#define smp_rmb() do {} while(0)
do_gettimeofday(&tv);
header->ph_subsys = subsys;
header->ph_mask = mask;
- header->ph_cpu_id = cfs_smp_processor_id();
+ header->ph_cpu_id = smp_processor_id();
header->ph_type = 0;
header->ph_sec = (__u32)tv.tv_sec;
header->ph_usec = tv.tv_usec;
libcfs_debug_file_path, PATH_MAX-1);
}
- /* If libcfs_debug_mb is set to an invalid value or uninitialized
- * then just make the total buffers smp_num_cpus * TCD_MAX_PAGES */
- if (max > cfs_trace_max_debug_mb() || max < cfs_num_possible_cpus()) {
- max = TCD_MAX_PAGES;
- } else {
- max = (max / cfs_num_possible_cpus());
+ /* If libcfs_debug_mb is set to an invalid value or uninitialized
+ * then just make the total buffers smp_num_cpus * TCD_MAX_PAGES */
+ if (max > cfs_trace_max_debug_mb() || max < num_possible_cpus()) {
+ max = TCD_MAX_PAGES;
+ } else {
+ max = (max / num_possible_cpus());
max = (max << (20 - PAGE_CACHE_SHIFT));
- }
- rc = cfs_tracefile_init(max);
+ }
+ rc = cfs_tracefile_init(max);
if (rc == 0)
libcfs_register_panic_notifier();
for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
cfs_trace_data[i] =
kmalloc(sizeof(union cfs_trace_data_union) *
- cfs_num_possible_cpus(), GFP_KERNEL);
+ num_possible_cpus(), GFP_KERNEL);
if (cfs_trace_data[i] == NULL)
goto out;
tcd->tcd_cpu = j;
}
- for (i = 0; i < cfs_num_possible_cpus(); i++)
+ for (i = 0; i < num_possible_cpus(); i++)
for (j = 0; j < 3; j++) {
cfs_trace_console_buffers[i][j] =
kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
int i;
int j;
- for (i = 0; i < cfs_num_possible_cpus(); i++)
+ for (i = 0; i < num_possible_cpus(); i++)
for (j = 0; j < 3; j++)
if (cfs_trace_console_buffers[i][j] != NULL) {
kfree(cfs_trace_console_buffers[i][j]);
header->ph_subsys = msgdata->msg_subsys;
header->ph_mask = msgdata->msg_mask;
- header->ph_cpu_id = cfs_smp_processor_id();
+ header->ph_cpu_id = smp_processor_id();
header->ph_type = cfs_trace_buf_idx_get();
header->ph_sec = (__u32)tv.tv_sec;
header->ph_usec = tv.tv_usec;
cfs_schedule_timeout(10);
}
- for (i = 0; i < cfs_num_online_cpus(); i++) {
- p = lwt_cpus[i].lwtc_current_page;
+ for (i = 0; i < num_online_cpus(); i++) {
+ p = lwt_cpus[i].lwtc_current_page;
if (p == NULL)
return (-ENODATA);
int i;
int j;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
- return (-EPERM);
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ return (-EPERM);
- *ncpu = cfs_num_online_cpus();
- *total_size = cfs_num_online_cpus() * lwt_pages_per_cpu *
- bytes_per_page;
- *now = get_cycles();
+ *ncpu = num_online_cpus();
+ *total_size = num_online_cpus() * lwt_pages_per_cpu * bytes_per_page;
+ *now = get_cycles();
- if (user_ptr == NULL)
- return (0);
+ if (user_ptr == NULL)
+ return (0);
- for (i = 0; i < cfs_num_online_cpus(); i++) {
- p = lwt_cpus[i].lwtc_current_page;
+ for (i = 0; i < num_online_cpus(); i++) {
+ p = lwt_cpus[i].lwtc_current_page;
if (p == NULL)
return -ENODATA;
return (0);
}
-int
-lwt_init ()
+int lwt_init ()
{
int i;
- int j;
+ int j;
- for (i = 0; i < cfs_num_online_cpus(); i++)
- if (lwt_cpus[i].lwtc_current_page != NULL)
- return (-EALREADY);
+ for (i = 0; i < num_online_cpus(); i++)
+ if (lwt_cpus[i].lwtc_current_page != NULL)
+ return (-EALREADY);
- LASSERT (!lwt_enabled);
+ LASSERT (!lwt_enabled);
/* NULL pointers, zero scalars */
memset (lwt_cpus, 0, sizeof (lwt_cpus));
- lwt_pages_per_cpu =
- LWT_MEMORY / (cfs_num_online_cpus() * PAGE_CACHE_SIZE);
+ lwt_pages_per_cpu = LWT_MEMORY / (num_online_cpus() * PAGE_CACHE_SIZE);
- for (i = 0; i < cfs_num_online_cpus(); i++)
+ for (i = 0; i < num_online_cpus(); i++)
for (j = 0; j < lwt_pages_per_cpu; j++) {
struct page *page = alloc_page(GFP_KERNEL);
lwt_page_t *lwtp;
return (0);
}
-void
-lwt_fini ()
+void lwt_fini ()
{
- int i;
+ int i;
- lwt_control(0, 0);
+ lwt_control(0, 0);
- for (i = 0; i < cfs_num_online_cpus(); i++)
- while (lwt_cpus[i].lwtc_current_page != NULL) {
- lwt_page_t *lwtp = lwt_cpus[i].lwtc_current_page;
+ for (i = 0; i < num_online_cpus(); i++)
+ while (lwt_cpus[i].lwtc_current_page != NULL) {
+ lwt_page_t *lwtp = lwt_cpus[i].lwtc_current_page;
if (cfs_list_empty (&lwtp->lwtp_list)) {
lwt_cpus[i].lwtc_current_page = NULL;
static char source_nid[sizeof(tmp_utsname->nodename)];
static int source_pid;
-int cfs_smp_processor_id = 1;
+int smp_processor_id = 1;
char libcfs_debug_file_path[1024];
FILE *debug_file_fd;
}
}
- tage->used = 0;
- tage->cpu = cfs_smp_processor_id();
- tage->type = tcd->tcd_type;
- cfs_list_add_tail(&tage->linkage, &tcd->tcd_pages);
- tcd->tcd_cur_pages++;
+ tage->used = 0;
+ tage->cpu = smp_processor_id();
+ tage->type = tcd->tcd_type;
+ cfs_list_add_tail(&tage->linkage, &tcd->tcd_pages);
+ tcd->tcd_cur_pages++;
if (tcd->tcd_cur_pages > 8 && thread_running) {
struct tracefiled_ctl *tctl = &trace_tctl;
void
cfs_trace_assertion_failed(const char *str,
- struct libcfs_debug_msg_data *msgdata)
+ struct libcfs_debug_msg_data *msgdata)
{
- struct ptldebug_header hdr;
+ struct ptldebug_header hdr;
- libcfs_panic_in_progress = 1;
- libcfs_catastrophe = 1;
- cfs_mb();
+ libcfs_panic_in_progress = 1;
+ libcfs_catastrophe = 1;
+ cfs_mb();
- cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
+ cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
- cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
- msgdata->msg_file, msgdata->msg_fn);
+ cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
+ msgdata->msg_file, msgdata->msg_fn);
- LIBCFS_PANIC("Lustre debug assertion failure\n");
+ panic("Lustre debug assertion failure\n");
- /* not reached */
+ /* not reached */
}
static void
int cfs_trace_set_debug_mb(int mb)
{
- int i;
- int j;
- int pages;
- int limit = cfs_trace_max_debug_mb();
- struct cfs_trace_cpu_data *tcd;
+ int i;
+ int j;
+ int pages;
+ int limit = cfs_trace_max_debug_mb();
+ struct cfs_trace_cpu_data *tcd;
- if (mb < cfs_num_possible_cpus()) {
- printk(CFS_KERN_WARNING
- "Lustre: %d MB is too small for debug buffer size, "
- "setting it to %d MB.\n", mb, cfs_num_possible_cpus());
- mb = cfs_num_possible_cpus();
- }
+ if (mb < num_possible_cpus()) {
+ printk(CFS_KERN_WARNING
+ "Lustre: %d MB is too small for debug buffer size, "
+ "setting it to %d MB.\n", mb, num_possible_cpus());
+ mb = num_possible_cpus();
+ }
- if (mb > limit) {
- printk(CFS_KERN_WARNING
- "Lustre: %d MB is too large for debug buffer size, "
- "setting it to %d MB.\n", mb, limit);
- mb = limit;
- }
+ if (mb > limit) {
+ printk(CFS_KERN_WARNING
+ "Lustre: %d MB is too large for debug buffer size, "
+ "setting it to %d MB.\n", mb, limit);
+ mb = limit;
+ }
- mb /= cfs_num_possible_cpus();
+ mb /= num_possible_cpus();
pages = mb << (20 - PAGE_CACHE_SHIFT);
- cfs_tracefile_write_lock();
+ cfs_tracefile_write_lock();
- cfs_tcd_for_each(tcd, i, j)
- tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
+ cfs_tcd_for_each(tcd, i, j)
+ tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
- cfs_tracefile_write_unlock();
+ cfs_tracefile_write_unlock();
- return 0;
+ return 0;
}
int cfs_trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob)
if (!cfs_list_empty(&pc.pc_pages)) {
int i;
- printk(CFS_KERN_ALERT "Lustre: trace pages aren't "
- " empty\n");
- printk(CFS_KERN_ERR "total cpus(%d): ",
- cfs_num_possible_cpus());
- for (i = 0; i < cfs_num_possible_cpus(); i++)
- if (cpu_online(i))
- printk(CFS_KERN_ERR "%d(on) ", i);
- else
- printk(CFS_KERN_ERR "%d(off) ", i);
- printk(CFS_KERN_ERR "\n");
+ printk(CFS_KERN_ALERT "Lustre: trace pages aren't "
+ " empty\n");
+ printk(CFS_KERN_ERR "total cpus(%d): ",
+ num_possible_cpus());
+ for (i = 0; i < num_possible_cpus(); i++)
+ if (cpu_online(i))
+ printk(CFS_KERN_ERR "%d(on) ", i);
+ else
+ printk(CFS_KERN_ERR "%d(off) ", i);
+ printk(CFS_KERN_ERR "\n");
i = 0;
cfs_list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
#define TCD_MAX_TYPES 8
extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[CFS_NR_CPUS];
-#define cfs_tcd_for_each(tcd, i, j) \
- for (i = 0; cfs_trace_data[i] != NULL; i++) \
- for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \
- j < cfs_num_possible_cpus(); \
- j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
+#define cfs_tcd_for_each(tcd, i, j) \
+ for (i = 0; cfs_trace_data[i] != NULL; i++) \
+ for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \
+ j < num_possible_cpus(); \
+ j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
#define cfs_tcd_for_each_type_lock(tcd, i, cpu) \
for (i = 0; cfs_trace_data[i] && \
extern char *cfs_trace_console_buffers[CFS_NR_CPUS][CFS_TCD_TYPE_MAX];
extern cfs_trace_buf_type_t cfs_trace_buf_idx_get(void);
-static inline char *
-cfs_trace_get_console_buffer(void)
+static inline char *cfs_trace_get_console_buffer(void)
{
- unsigned int i = cfs_get_cpu();
- unsigned int j = cfs_trace_buf_idx_get();
+ unsigned int i = get_cpu();
+ unsigned int j = cfs_trace_buf_idx_get();
- return cfs_trace_console_buffers[i][j];
+ return cfs_trace_console_buffers[i][j];
}
static inline void
cfs_trace_put_console_buffer(char *buffer)
{
- cfs_put_cpu();
+ put_cpu();
}
-static inline struct cfs_trace_cpu_data *
-cfs_trace_get_tcd(void)
+static inline struct cfs_trace_cpu_data *cfs_trace_get_tcd(void)
{
struct cfs_trace_cpu_data *tcd =
- &(*cfs_trace_data[cfs_trace_buf_idx_get()])[cfs_get_cpu()].tcd;
+ &(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd;
cfs_trace_lock_tcd(tcd, 0);
return tcd;
}
-static inline void
-cfs_trace_put_tcd (struct cfs_trace_cpu_data *tcd)
+static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd)
{
cfs_trace_unlock_tcd(tcd, 0);
-
- cfs_put_cpu();
+ put_cpu();
}
int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, int gfp,
}
/* initialize engine threads list */
- ks_data.ksnd_engine_nums = cfs_num_online_cpus();
+ ks_data.ksnd_engine_nums = num_online_cpus();
if (ks_data.ksnd_engine_nums < 4) {
ks_data.ksnd_engine_nums = 4;
}
tcd->tcd_cpu = (USHORT)j;
}
- for (i = 0; i < cfs_num_possible_cpus(); i++)
+ for (i = 0; i < num_possible_cpus(); i++)
for (j = 0; j < CFS_TCD_TYPE_MAX; j++) {
cfs_trace_console_buffers[i][j] =
kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
int i;
int j;
- for (i = 0; i < cfs_num_possible_cpus(); i++) {
+ for (i = 0; i < num_possible_cpus(); i++) {
for (j = 0; j < CFS_TCD_TYPE_MAX; j++) {
if (cfs_trace_console_buffers[i][j] != NULL) {
kfree(cfs_trace_console_buffers[i][j]);
header->ph_subsys = subsys;
header->ph_mask = mask;
- header->ph_cpu_id = cfs_smp_processor_id();
+ header->ph_cpu_id = smp_processor_id();
header->ph_type = cfs_trace_buf_idx_get();
header->ph_sec = (__u32)tv.tv_sec;
header->ph_usec = tv.tv_usec;
//#define PJK_DEBUGGING
#ifdef CONFIG_SMP
-# define PTLLND_N_SCHED cfs_num_online_cpus() /* # schedulers */
+# define PTLLND_N_SCHED num_online_cpus() /* # schedulers */
#else
# define PTLLND_N_SCHED 1 /* # schedulers */
#endif
/**********************************************************************/
/* Spawn scheduling threads */
- for (i = 0; i < cfs_num_online_cpus(); i++) {
+ for (i = 0; i < num_online_cpus(); i++) {
rc = kqswnal_thread_start(kqswnal_scheduler, NULL,
"kqswnal_sched");
if (rc != 0)
static inline int
ksocknal_nsched(void)
{
- return cfs_num_online_cpus();
+ return num_online_cpus();
}
static inline int
info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
- LASSERT_MUTEX_LOCKED(&fld->lsf_lock);
+ LASSERT(mutex_is_locked(&fld->lsf_lock));
range = &info->fti_lrange;
memset(range, 0, sizeof(*range));
spin_lock(&stats->ls_lock);
return 0;
} else {
- unsigned int cpuid = cfs_get_cpu();
+ unsigned int cpuid = get_cpu();
if (unlikely(stats->ls_percpu[cpuid] == NULL)) {
rc = lprocfs_stats_alloc_one(stats, cpuid);
if (rc < 0) {
- cfs_put_cpu();
+ put_cpu();
return rc;
}
}
spin_unlock(&stats->ls_lock);
}
} else {
- cfs_put_cpu();
+ put_cpu();
}
return;
#define OBD_LDLM_DEVICENAME "ldlm"
-#define LDLM_DEFAULT_LRU_SIZE (100 * cfs_num_online_cpus())
+#define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000))
#define LDLM_CTIME_AGE_LIMIT (10)
#define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024
/** Check if resource is already locked, assert if not. */
static inline void check_res_locked(struct ldlm_resource *res)
{
- LASSERT_SPIN_LOCKED(&res->lr_lock);
+ LASSERT(spin_is_locked(&res->lr_lock));
}
struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock);
#define LDLM_THR_FACTOR 8
#define LDLM_NTHRS_INIT PTLRPC_NTHRS_INIT
#define LDLM_NTHRS_BASE 24
-#define LDLM_NTHRS_MAX (cfs_num_online_cpus() == 1 ? 64 : 128)
+#define LDLM_NTHRS_MAX (num_online_cpus() == 1 ? 64 : 128)
#define LDLM_BL_THREADS LDLM_NTHRS_AUTO_INIT
#define LDLM_CLIENT_NBUFS 1
ldlm_side_t client)
{
LASSERT(!cfs_list_empty(&ns->ns_list_chain));
- LASSERT_MUTEX_LOCKED(ldlm_namespace_lock(client));
+ LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
cfs_list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
}
ldlm_side_t client)
{
LASSERT(!cfs_list_empty(&ns->ns_list_chain));
- LASSERT_MUTEX_LOCKED(ldlm_namespace_lock(client));
+ LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
cfs_list_move_tail(&ns->ns_list_chain,
ldlm_namespace_inactive_list(client));
}
/** Should be called with ldlm_namespace_lock(client) taken. */
struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
{
- LASSERT_MUTEX_LOCKED(ldlm_namespace_lock(client));
- LASSERT(!cfs_list_empty(ldlm_namespace_list(client)));
- return container_of(ldlm_namespace_list(client)->next,
- struct ldlm_namespace, ns_list_chain);
+ LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
+ LASSERT(!cfs_list_empty(ldlm_namespace_list(client)));
+ return container_of(ldlm_namespace_list(client)->next,
+ struct ldlm_namespace, ns_list_chain);
}
/** Create and initialize new resource. */
* In fact, page cannot be locked here at all, because
* ll_dir_filler() does synchronous io.
*/
- wait_on_page(page);
- if (PageUptodate(page)) {
+ wait_on_page_locked(page);
+ if (PageUptodate(page)) {
dp = kmap(page);
if (BITS_PER_LONG == 32 && hash64) {
*start = le64_to_cpu(dp->ldp_hash_start) >> 32;
GOTO(out_unlock, page);
}
- wait_on_page(page);
- (void)kmap(page);
- if (!PageUptodate(page)) {
- CERROR("page not updated: "DFID" at "LPU64": rc %d\n",
- PFID(ll_inode2fid(dir)), hash, -5);
- goto fail;
- }
+ wait_on_page_locked(page);
+ (void)kmap(page);
+ if (!PageUptodate(page)) {
+ CERROR("page not updated: "DFID" at "LPU64": rc %d\n",
+ PFID(ll_inode2fid(dir)), hash, -5);
+ goto fail;
+ }
if (!PageChecked(page))
ll_check_page(dir, page);
if (PageError(page)) {
static inline void cl_isize_write_nolock(struct inode *inode, loff_t kms)
{
- LASSERT_SEM_LOCKED(&ll_i2info(inode)->lli_size_sem);
- i_size_write(inode, kms);
+ LASSERT(down_trylock(&ll_i2info(inode)->lli_size_sem) != 0);
+ i_size_write(inode, kms);
}
static inline void cl_isize_write(struct inode *inode, loff_t kms)
int i;
int rc = 0;
- LASSERT_SPIN_LOCKED(&lsm->lsm_lock);
+ LASSERT(spin_is_locked(&lsm->lsm_lock));
#ifdef __KERNEL__
LASSERT(lsm->lsm_lock_owner == cfs_curproc_pid());
#endif
__u64 kms;
ENTRY;
- LASSERT_SPIN_LOCKED(&lsm->lsm_lock);
+ LASSERT(spin_is_locked(&lsm->lsm_lock));
#ifdef __KERNEL__
- LASSERT(lsm->lsm_lock_owner == cfs_curproc_pid());
+ LASSERT(lsm->lsm_lock_owner == cfs_curproc_pid());
#endif
if (shrink) {
ENTRY;
- head = cl_object_header(obj);
- LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
+ head = cl_object_header(obj);
+ LINVRNT(spin_is_locked(&head->coh_lock_guard));
CS_LOCK_INC(obj, lookup);
- cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
- int matched;
+ cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
+ int matched;
matched = cl_lock_ext_match(&lock->cll_descr, need) &&
lock->cll_state < CLS_FREEING &&
int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
struct cl_attr *attr)
{
- struct lu_object_header *top;
- int result;
+ struct lu_object_header *top;
+ int result;
- LASSERT_SPIN_LOCKED(cl_object_attr_guard(obj));
- ENTRY;
+ LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
+ ENTRY;
top = obj->co_lu.lo_header;
result = 0;
int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
const struct cl_attr *attr, unsigned v)
{
- struct lu_object_header *top;
- int result;
+ struct lu_object_header *top;
+ int result;
- LASSERT_SPIN_LOCKED(cl_object_attr_guard(obj));
- ENTRY;
+ LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
+ ENTRY;
top = obj->co_lu.lo_header;
result = 0;
*/
struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
{
- struct cl_page *page;
+ struct cl_page *page;
- LASSERT_SPIN_LOCKED(&hdr->coh_page_guard);
+ LASSERT(spin_is_locked(&hdr->coh_page_guard));
- page = radix_tree_lookup(&hdr->coh_tree, index);
- if (page != NULL)
- cl_page_get_trust(page);
- return page;
+ page = radix_tree_lookup(&hdr->coh_tree, index);
+ if (page != NULL)
+ cl_page_get_trust(page);
+ return page;
}
EXPORT_SYMBOL(cl_page_lookup);
if (flags & LPROCFS_STATS_FLAG_NOPERCPU)
num_entry = 1;
else
- num_entry = cfs_num_possible_cpus();
+ num_entry = num_possible_cpus();
/* alloc percpu pointers for all possible cpu slots */
LIBCFS_ALLOC(stats, offsetof(typeof(*stats), ls_percpu[num_entry]));
if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU)
num_entry = 1;
else
- num_entry = cfs_num_possible_cpus();
+ num_entry = num_possible_cpus();
percpusize = lprocfs_stats_counter_size(stats);
for (i = 0; i < num_entry; i++)
{
struct filter_export_data *fed = &exp->exp_filter_data;
- LASSERT_SPIN_LOCKED(&fed->fed_lock);
+ LASSERT(spin_is_locked(&fed->fed_lock));
if (--fmd->fmd_refcount == 0) {
/* XXX when we have persistent reservations and the handle
* is stored herein we need to drop it here. */
cfs_time_t now = cfs_time_current();
- LASSERT_SPIN_LOCKED(&fed->fed_lock);
+ LASSERT(spin_is_locked(&fed->fed_lock));
cfs_list_for_each_entry_reverse(fmd, &fed->fed_mod_list, fmd_list) {
if (lu_fid_eq(&fmd->fmd_fid, fid)) {
obd_size unstable;
ENTRY;
- LASSERT_SPIN_LOCKED(&ofd->ofd_grant_lock);
+ LASSERT(spin_is_locked(&ofd->ofd_grant_lock));
spin_lock(&ofd->ofd_osfs_lock);
/* get available space from cached statfs data */
long dirty, dropped, grant_chunk;
ENTRY;
- LASSERT_SPIN_LOCKED(&ofd->ofd_grant_lock);
+ LASSERT(spin_is_locked(&ofd->ofd_grant_lock));
if ((oa->o_valid & (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) !=
(OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) {
struct obd_device *obd = exp->exp_obd;
long grant_shrink;
- LASSERT_SPIN_LOCKED(&ofd->ofd_grant_lock);
+ LASSERT(spin_is_locked(&ofd->ofd_grant_lock));
LASSERT(exp);
if (left_space >= ofd->ofd_tot_granted_clients *
OFD_GRANT_SHRINK_LIMIT(exp))
ENTRY;
- LASSERT_SPIN_LOCKED(&ofd->ofd_grant_lock);
+ LASSERT(spin_is_locked(&ofd->ofd_grant_lock));
if ((oa->o_valid & OBD_MD_FLFLAGS) &&
(oa->o_flags & OBD_FL_RECOV_RESEND)) {
ENTRY;
LASSERT(res);
- LASSERT_MUTEX_LOCKED(&res->lr_lvb_mutex);
+ LASSERT(mutex_is_locked(&res->lr_lvb_mutex));
if (res->lr_lvb_data != NULL)
RETURN(0);
static void osc_consume_write_grant(struct client_obd *cli,
struct brw_page *pga)
{
- LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock);
+ LASSERT(spin_is_locked(&cli->cl_loi_list_lock.lock));
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
cfs_atomic_inc(&obd_dirty_pages);
cli->cl_dirty += PAGE_CACHE_SIZE;
{
ENTRY;
- LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock);
+ LASSERT(spin_is_locked(&cli->cl_loi_list_lock.lock));
if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
EXIT;
return;
struct buffer_head *bh;
int err;
- LASSERT_SEM_LOCKED(&c->ic_idle_sem);
+ LASSERT(down_trylock(&c->ic_idle_sem) != 0);
if (blk == 0)
return NULL;
*/
void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
{
- LASSERT_SPIN_LOCKED(&request->rq_import->imp_lock);
- (void)__ptlrpc_req_finished(request, 1);
+ LASSERT(spin_is_locked(&request->rq_import->imp_lock));
+ (void)__ptlrpc_req_finished(request, 1);
}
EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock);
struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
ENTRY;
- LASSERT(imp != NULL);
-
- LASSERT_SPIN_LOCKED(&imp->imp_lock);
+ LASSERT(imp != NULL);
+ LASSERT(spin_is_locked(&imp->imp_lock));
if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
struct obd_import *imp)
{
- cfs_list_t *tmp;
+ cfs_list_t *tmp;
- LASSERT_SPIN_LOCKED(&imp->imp_lock);
+ LASSERT(spin_is_locked(&imp->imp_lock));
if (req->rq_transno == 0) {
DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
static
void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *freelist)
{
- LASSERT_SPIN_LOCKED(&ctx->cc_sec->ps_lock);
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(spin_is_locked(&ctx->cc_sec->ps_lock));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
- LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
+ LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
- if (cfs_atomic_dec_and_test(&ctx->cc_refcount)) {
- __cfs_hlist_del(&ctx->cc_cache);
- cfs_hlist_add_head(&ctx->cc_cache, freelist);
- } else {
- cfs_hlist_del_init(&ctx->cc_cache);
- }
+ if (cfs_atomic_dec_and_test(&ctx->cc_refcount)) {
+ __cfs_hlist_del(&ctx->cc_cache);
+ cfs_hlist_add_head(&ctx->cc_cache, freelist);
+ } else {
+ cfs_hlist_del_init(&ctx->cc_cache);
+ }
}
/*
static
void gss_unhash_msg_nolock(struct gss_upcall_msg *gmsg)
{
- __u32 idx = gmsg->gum_mechidx;
+ __u32 idx = gmsg->gum_mechidx;
- LASSERT(idx < MECH_MAX);
- LASSERT_SPIN_LOCKED(&upcall_locks[idx]);
+ LASSERT(idx < MECH_MAX);
+ LASSERT(spin_is_locked(&upcall_locks[idx]));
- if (cfs_list_empty(&gmsg->gum_list))
- return;
+ if (cfs_list_empty(&gmsg->gum_list))
+ return;
- cfs_list_del_init(&gmsg->gum_list);
- LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 1);
- cfs_atomic_dec(&gmsg->gum_refcount);
+ cfs_list_del_init(&gmsg->gum_list);
+ LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 1);
+ cfs_atomic_dec(&gmsg->gum_refcount);
}
static
static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp)
{
ENTRY;
- LASSERT_SPIN_LOCKED(&imp->imp_lock);
+ LASSERT(spin_is_locked(&imp->imp_lock));
CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
imp->imp_invalid = 1;
void ptlrpc_pinger_commit_expected(struct obd_import *imp)
{
ptlrpc_update_next_ping(imp, 1);
- LASSERT_SPIN_LOCKED(&imp->imp_lock);
+ LASSERT(spin_is_locked(&imp->imp_lock));
/*
* Avoid reading stale imp_connect_data. When not sure if pings are
* expected or not on next connection, we assume they are not and force
ptlrpc_pinger_register_timeout(int time, enum timeout_event event,
timeout_cb_t cb, void *data)
{
- struct timeout_item *item, *tmp;
+ struct timeout_item *item, *tmp;
- LASSERT_MUTEX_LOCKED(&pinger_mutex);
+ LASSERT(mutex_is_locked(&pinger_mutex));
- cfs_list_for_each_entry(item, &timeout_list, ti_chain)
- if (item->ti_event == event)
- goto out;
+ cfs_list_for_each_entry(item, &timeout_list, ti_chain)
+ if (item->ti_event == event)
+ goto out;
item = ptlrpc_new_timeout(time, event, cb, data);
if (item) {
return &ptlrpcds->pd_thread_rcv;
#ifdef __KERNEL__
- switch (policy) {
- case PDL_POLICY_SAME:
- idx = cfs_smp_processor_id() % ptlrpcds->pd_nthreads;
- break;
+ switch (policy) {
+ case PDL_POLICY_SAME:
+ idx = smp_processor_id() % ptlrpcds->pd_nthreads;
+ break;
case PDL_POLICY_LOCAL:
/* Before CPU partition patches available, process it the same
* as "PDL_POLICY_ROUND". */
* CPU partition patches are available. */
index = -1;
case PDL_POLICY_PREFERRED:
- if (index >= 0 && index < cfs_num_online_cpus()) {
+ if (index >= 0 && index < num_online_cpus()) {
idx = index % ptlrpcds->pd_nthreads;
break;
}
case PDL_POLICY_ROUND:
/* We do not care whether it is strict load balance. */
idx = ptlrpcds->pd_index + 1;
- if (idx == cfs_smp_processor_id())
+ if (idx == smp_processor_id())
idx++;
idx %= ptlrpcds->pd_nthreads;
ptlrpcds->pd_index = idx;
if (test_bit(LIOD_BIND, &pc->pc_flags)) {
int index = pc->pc_index;
- if (index >= 0 && index < cfs_num_possible_cpus()) {
+ if (index >= 0 && index < num_possible_cpus()) {
while (!cpu_online(index)) {
- if (++index >= cfs_num_possible_cpus())
+ if (++index >= num_possible_cpus())
index = 0;
}
set_cpus_allowed_ptr(cfs_current(),
{
int i;
mask = *cpumask_of_node(cpu_to_node(index));
- for (i = max; i < cfs_num_online_cpus(); i++)
+ for (i = max; i < num_online_cpus(); i++)
cpu_clear(i, mask);
pc->pc_npartners = cpus_weight(mask) - 1;
set_bit(LIOD_BIND, &pc->pc_flags);
static int ptlrpcd_init(void)
{
- int nthreads = cfs_num_online_cpus();
- char name[16];
- int size, i = -1, j, rc = 0;
- ENTRY;
+ int nthreads = num_online_cpus();
+ char name[16];
+ int size, i = -1, j, rc = 0;
+ ENTRY;
#ifdef __KERNEL__
if (max_ptlrpcds > 0 && max_ptlrpcds < nthreads)
static inline void enc_pools_wakeup(void)
{
- LASSERT_SPIN_LOCKED(&page_pools.epp_lock);
- LASSERT(page_pools.epp_waitqlen >= 0);
+ LASSERT(spin_is_locked(&page_pools.epp_lock));
+ LASSERT(page_pools.epp_waitqlen >= 0);
- if (unlikely(page_pools.epp_waitqlen)) {
- LASSERT(cfs_waitq_active(&page_pools.epp_waitq));
- cfs_waitq_broadcast(&page_pools.epp_waitq);
- }
+ if (unlikely(page_pools.epp_waitqlen)) {
+ LASSERT(cfs_waitq_active(&page_pools.epp_waitq));
+ cfs_waitq_broadcast(&page_pools.epp_waitq);
+ }
}
static int enc_pools_should_grow(int page_needed, long now)
{
ENTRY;
- LASSERT_SPIN_LOCKED(&rs->rs_svcpt->scp_rep_lock);
- LASSERT_SPIN_LOCKED(&rs->rs_lock);
- LASSERT (rs->rs_difficult);
- rs->rs_scheduled_ever = 1; /* flag any notification attempt */
+ LASSERT(spin_is_locked(&rs->rs_svcpt->scp_rep_lock));
+ LASSERT(spin_is_locked(&rs->rs_lock));
+ LASSERT (rs->rs_difficult);
+ rs->rs_scheduled_ever = 1; /* flag any notification attempt */
- if (rs->rs_scheduled) { /* being set up or already notified */
- EXIT;
- return;
- }
+ if (rs->rs_scheduled) { /* being set up or already notified */
+ EXIT;
+ return;
+ }
- rs->rs_scheduled = 1;
- cfs_list_del_init(&rs->rs_list);
- ptlrpc_dispatch_difficult_reply(rs);
- EXIT;
+ rs->rs_scheduled = 1;
+ cfs_list_del_init(&rs->rs_list);
+ ptlrpc_dispatch_difficult_reply(rs);
+ EXIT;
}
EXPORT_SYMBOL(ptlrpc_schedule_difficult_reply);