return CFS_TCD_TYPE_PROC;
}
-int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd)
+/*
+ * The walking argument indicates the locking comes from all tcd types
+ * iterator and we must lock it and dissable local irqs to avoid deadlocks
+ * with other interrupt locks that might be happening. See LU-1311
+ * for details.
+ */
+int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
{
__LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
cfs_spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
cfs_spin_lock_bh(&tcd->tcd_lock);
+ else if (unlikely(walking))
+ cfs_spin_lock_irq(&tcd->tcd_lock);
else
cfs_spin_lock(&tcd->tcd_lock);
return 1;
}
-void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd)
+void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
{
__LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
cfs_spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
cfs_spin_unlock_bh(&tcd->tcd_lock);
+ else if (unlikely(walking))
+ cfs_spin_unlock_irq(&tcd->tcd_lock);
else
cfs_spin_unlock(&tcd->tcd_lock);
}
#define cfs_tcd_for_each_type_lock(tcd, i, cpu) \
for (i = 0; cfs_trace_data[i] && \
(tcd = &(*cfs_trace_data[i])[cpu].tcd) && \
- cfs_trace_lock_tcd(tcd); cfs_trace_unlock_tcd(tcd), i++)
+ cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++)
/* XXX nikita: this declaration is internal to tracefile.c and should probably
* be moved there */
const char *buf, int len, const char *file,
const char *fn);
-extern int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd);
-extern void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd);
+extern int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
+extern void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
/**
* trace_buf_type_t, trace_buf_idx_get() and trace_console_buffers[][]
struct cfs_trace_cpu_data *tcd =
&(*cfs_trace_data[cfs_trace_buf_idx_get()])[cfs_get_cpu()].tcd;
- cfs_trace_lock_tcd(tcd);
+ cfs_trace_lock_tcd(tcd, 0);
return tcd;
}
static inline void
cfs_trace_put_tcd (struct cfs_trace_cpu_data *tcd)
{
- cfs_trace_unlock_tcd(tcd);
+ cfs_trace_unlock_tcd(tcd, 0);
cfs_put_cpu();
}
return CFS_TCD_TYPE_PASSIVE;
}
-int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd)
+int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
{
__LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
return 1;
}
-void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd)
+void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
{
__LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
}