perf/marvell: Refactor to extract PMU operations

Introduce a refactor to the Marvell DDR PMU driver to extract
PMU operations ("pmu ops") from the existing driver.

Reviewed-by: Jonathan Cameron <Jonathan.Cameron@Huawei.com>
Signed-off-by: Gowthami Thiagarajan <gthiagarajan@marvell.com>
Link: https://lore.kernel.org/r/20241108040619.753343-3-gthiagarajan@marvell.com
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Gowthami Thiagarajan
2024-11-08 09:36:16 +05:30
committed by Will Deacon
parent 349f77e109
commit 0045de7e87
+83 -22
View File
@@ -127,6 +127,7 @@ struct cn10k_ddr_pmu {
struct pmu pmu;
void __iomem *base;
const struct ddr_pmu_platform_data *p_data;
const struct ddr_pmu_ops *ops;
unsigned int cpu;
struct device *dev;
int active_events;
@@ -135,6 +136,16 @@ struct cn10k_ddr_pmu {
struct hlist_node node;
};
struct ddr_pmu_ops {
void (*enable_read_freerun_counter)(struct cn10k_ddr_pmu *pmu,
bool enable);
void (*enable_write_freerun_counter)(struct cn10k_ddr_pmu *pmu,
bool enable);
void (*clear_read_freerun_counter)(struct cn10k_ddr_pmu *pmu);
void (*clear_write_freerun_counter)(struct cn10k_ddr_pmu *pmu);
void (*pmu_overflow_handler)(struct cn10k_ddr_pmu *pmu, int evt_idx);
};
#define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu)
struct ddr_pmu_platform_data {
@@ -375,6 +386,7 @@ static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
int counter, bool enable)
{
const struct ddr_pmu_platform_data *p_data = pmu->p_data;
const struct ddr_pmu_ops *ops = pmu->ops;
u32 reg;
u64 val;
@@ -394,21 +406,10 @@ static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
writeq_relaxed(val, pmu->base + reg);
} else {
val = readq_relaxed(pmu->base +
p_data->cnt_freerun_en);
if (enable) {
if (counter == DDRC_PERF_READ_COUNTER_IDX)
val |= DDRC_PERF_FREERUN_READ_EN;
else
val |= DDRC_PERF_FREERUN_WRITE_EN;
} else {
if (counter == DDRC_PERF_READ_COUNTER_IDX)
val &= ~DDRC_PERF_FREERUN_READ_EN;
else
val &= ~DDRC_PERF_FREERUN_WRITE_EN;
}
writeq_relaxed(val, pmu->base +
p_data->cnt_freerun_en);
if (counter == DDRC_PERF_READ_COUNTER_IDX)
ops->enable_read_freerun_counter(pmu, enable);
else
ops->enable_write_freerun_counter(pmu, enable);
}
}
@@ -464,6 +465,7 @@ static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
{
struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
const struct ddr_pmu_platform_data *p_data = pmu->p_data;
const struct ddr_pmu_ops *ops = pmu->ops;
struct hw_perf_event *hwc = &event->hw;
u8 config = event->attr.config;
int counter, ret;
@@ -492,11 +494,9 @@ static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
} else {
/* fixed event counter, clear counter value */
if (counter == DDRC_PERF_READ_COUNTER_IDX)
val = DDRC_FREERUN_READ_CNT_CLR;
ops->clear_read_freerun_counter(pmu);
else
val = DDRC_FREERUN_WRITE_CNT_CLR;
writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
ops->clear_write_freerun_counter(pmu);
}
hwc->state |= PERF_HES_STOPPED;
@@ -578,9 +578,63 @@ static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu)
}
}
static void ddr_pmu_enable_read_freerun(struct cn10k_ddr_pmu *pmu, bool enable)
{
const struct ddr_pmu_platform_data *p_data = pmu->p_data;
u64 val;
val = readq_relaxed(pmu->base + p_data->cnt_freerun_en);
if (enable)
val |= DDRC_PERF_FREERUN_READ_EN;
else
val &= ~DDRC_PERF_FREERUN_READ_EN;
writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en);
}
static void ddr_pmu_enable_write_freerun(struct cn10k_ddr_pmu *pmu, bool enable)
{
const struct ddr_pmu_platform_data *p_data = pmu->p_data;
u64 val;
val = readq_relaxed(pmu->base + p_data->cnt_freerun_en);
if (enable)
val |= DDRC_PERF_FREERUN_WRITE_EN;
else
val &= ~DDRC_PERF_FREERUN_WRITE_EN;
writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en);
}
static void ddr_pmu_read_clear_freerun(struct cn10k_ddr_pmu *pmu)
{
const struct ddr_pmu_platform_data *p_data = pmu->p_data;
u64 val;
val = DDRC_FREERUN_READ_CNT_CLR;
writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
}
static void ddr_pmu_write_clear_freerun(struct cn10k_ddr_pmu *pmu)
{
const struct ddr_pmu_platform_data *p_data = pmu->p_data;
u64 val;
val = DDRC_FREERUN_WRITE_CNT_CLR;
writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
}
static void ddr_pmu_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx)
{
cn10k_ddr_perf_event_update_all(pmu);
cn10k_ddr_perf_pmu_disable(&pmu->pmu);
cn10k_ddr_perf_pmu_enable(&pmu->pmu);
}
static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
{
const struct ddr_pmu_platform_data *p_data = pmu->p_data;
const struct ddr_pmu_ops *ops = pmu->ops;
struct perf_event *event;
struct hw_perf_event *hwc;
u64 prev_count, new_count;
@@ -620,9 +674,7 @@ static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
value = cn10k_ddr_perf_read_counter(pmu, i);
if (value == p_data->counter_max_val) {
pr_info("Counter-(%d) reached max value\n", i);
cn10k_ddr_perf_event_update_all(pmu);
cn10k_ddr_perf_pmu_disable(&pmu->pmu);
cn10k_ddr_perf_pmu_enable(&pmu->pmu);
ops->pmu_overflow_handler(pmu, i);
}
}
@@ -661,6 +713,14 @@ static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
}
static const struct ddr_pmu_ops ddr_pmu_ops = {
.enable_read_freerun_counter = ddr_pmu_enable_read_freerun,
.enable_write_freerun_counter = ddr_pmu_enable_write_freerun,
.clear_read_freerun_counter = ddr_pmu_read_clear_freerun,
.clear_write_freerun_counter = ddr_pmu_write_clear_freerun,
.pmu_overflow_handler = ddr_pmu_overflow_hander,
};
#if defined(CONFIG_ACPI) || defined(CONFIG_OF)
static const struct ddr_pmu_platform_data cn10k_ddr_pmu_pdata = {
.counter_overflow_val = BIT_ULL(48),
@@ -713,6 +773,7 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
is_cn10k = ddr_pmu->p_data->is_cn10k;
if (is_cn10k) {
ddr_pmu->ops = &ddr_pmu_ops;
/* Setup the PMU counter to work in manual mode */
writeq_relaxed(OP_MODE_CTRL_VAL_MANUAL, ddr_pmu->base +
ddr_pmu->p_data->cnt_op_mode_ctrl);