/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
-cfs_rw_semaphore_t cfs_tracefile_sem;
+struct rw_semaphore cfs_tracefile_sem;
int cfs_tracefile_init_arch()
{
int j;
struct cfs_trace_cpu_data *tcd;
- cfs_init_rwsem(&cfs_tracefile_sem);
+ init_rwsem(&cfs_tracefile_sem);
/* initialize trace_data */
memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
cfs_trace_data[i] =
- kmalloc(sizeof(union cfs_trace_data_union) * NR_CPUS,
- GFP_KERNEL);
+ kmalloc(sizeof(union cfs_trace_data_union) *
+ num_possible_cpus(), GFP_KERNEL);
if (cfs_trace_data[i] == NULL)
goto out;
/* arch related info initialized */
cfs_tcd_for_each(tcd, i, j) {
- cfs_spin_lock_init(&tcd->tcd_lock);
+ spin_lock_init(&tcd->tcd_lock);
tcd->tcd_pages_factor = pages_factor[i];
tcd->tcd_type = i;
tcd->tcd_cpu = j;
for (i = 0; i < num_possible_cpus(); i++)
for (j = 0; j < 3; j++) {
- cfs_trace_console_buffers[i][j] =
- kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
- GFP_KERNEL);
+ cfs_trace_console_buffers[i][j] =
+ kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
+ GFP_KERNEL);
if (cfs_trace_console_buffers[i][j] == NULL)
goto out;
cfs_tracefile_fini_arch();
printk(KERN_ERR "lnet: Not enough memory\n");
return -ENOMEM;
-
}
void cfs_tracefile_fini_arch()
cfs_trace_data[i] = NULL;
}
- cfs_fini_rwsem(&cfs_tracefile_sem);
+ fini_rwsem(&cfs_tracefile_sem);
}
void cfs_tracefile_read_lock()
{
- cfs_down_read(&cfs_tracefile_sem);
+ down_read(&cfs_tracefile_sem);
}
void cfs_tracefile_read_unlock()
{
- cfs_up_read(&cfs_tracefile_sem);
+ up_read(&cfs_tracefile_sem);
}
void cfs_tracefile_write_lock()
{
- cfs_down_write(&cfs_tracefile_sem);
+ down_write(&cfs_tracefile_sem);
}
void cfs_tracefile_write_unlock()
{
- cfs_up_write(&cfs_tracefile_sem);
+ up_write(&cfs_tracefile_sem);
}
cfs_trace_buf_type_t cfs_trace_buf_idx_get()
* for details.
*/
int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
+__acquires(&tcd->tcd_lock)
{
__LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
- if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
- cfs_spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
- else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
- cfs_spin_lock_bh(&tcd->tcd_lock);
- else if (unlikely(walking))
- cfs_spin_lock_irq(&tcd->tcd_lock);
- else
- cfs_spin_lock(&tcd->tcd_lock);
+ if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
+ spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
+ else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
+ spin_lock_bh(&tcd->tcd_lock);
+ else if (unlikely(walking))
+ spin_lock_irq(&tcd->tcd_lock);
+ else
+ spin_lock(&tcd->tcd_lock);
return 1;
}
void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
+__releases(&tcd->tcd_lock)
{
__LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
- if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
- cfs_spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
- else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
- cfs_spin_unlock_bh(&tcd->tcd_lock);
- else if (unlikely(walking))
- cfs_spin_unlock_irq(&tcd->tcd_lock);
- else
- cfs_spin_unlock(&tcd->tcd_lock);
+ if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
+ spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
+ else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
+ spin_unlock_bh(&tcd->tcd_lock);
+ else if (unlikely(walking))
+ spin_unlock_irq(&tcd->tcd_lock);
+ else
+ spin_unlock(&tcd->tcd_lock);
}
int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
header->ph_subsys = msgdata->msg_subsys;
header->ph_mask = msgdata->msg_mask;
- header->ph_cpu_id = cfs_smp_processor_id();
+ header->ph_cpu_id = smp_processor_id();
header->ph_type = cfs_trace_buf_idx_get();
header->ph_sec = (__u32)tv.tv_sec;
header->ph_usec = tv.tv_usec;
int cfs_trace_max_debug_mb(void)
{
- int total_mb = (cfs_num_physpages >> (20 - PAGE_SHIFT));
+ int total_mb = (totalram_pages >> (20 - PAGE_SHIFT));
return MAX(512, (total_mb * 80)/100);
}