diff options
Diffstat (limited to 'drivers/hwtracing/coresight/coresight-etm-perf.c')
-rw-r--r-- | drivers/hwtracing/coresight/coresight-etm-perf.c | 27 |
1 files changed, 11 insertions, 16 deletions
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c index ad6a8f4b70b6..f4cccd68e625 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c @@ -136,13 +136,13 @@ static const struct attribute_group *etm_pmu_attr_groups[] = { NULL, }; -static inline struct list_head ** +static inline struct coresight_path ** etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu) { return per_cpu_ptr(data->path, cpu); } -static inline struct list_head * +static inline struct coresight_path * etm_event_cpu_path(struct etm_event_data *data, int cpu) { return *etm_event_cpu_path_ptr(data, cpu); @@ -226,7 +226,7 @@ static void free_event_data(struct work_struct *work) cscfg_deactivate_config(event_data->cfg_hash); for_each_cpu(cpu, mask) { - struct list_head **ppath; + struct coresight_path **ppath; ppath = etm_event_cpu_path_ptr(event_data, cpu); if (!(IS_ERR_OR_NULL(*ppath))) { @@ -276,7 +276,7 @@ static void *alloc_event_data(int cpu) * unused memory when dealing with single CPU trace scenarios is small * compared to the cost of searching through an optimized array. */ - event_data->path = alloc_percpu(struct list_head *); + event_data->path = alloc_percpu(struct coresight_path *); if (!event_data->path) { kfree(event_data); @@ -317,7 +317,6 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, { u32 id, cfg_hash; int cpu = event->cpu; - int trace_id; cpumask_t *mask; struct coresight_device *sink = NULL; struct coresight_device *user_sink = NULL, *last_sink = NULL; @@ -352,7 +351,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, * CPUs, we can handle it and fail the session. */ for_each_cpu(cpu, mask) { - struct list_head *path; + struct coresight_path *path; struct coresight_device *csdev; csdev = per_cpu(csdev_src, cpu); @@ -407,8 +406,8 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, } /* ensure we can allocate a trace ID for this CPU */ - trace_id = coresight_trace_id_get_cpu_id_map(cpu, &sink->perf_sink_id_map); - if (!IS_VALID_CS_TRACE_ID(trace_id)) { + coresight_path_assign_trace_id(path, CS_MODE_PERF); + if (!IS_VALID_CS_TRACE_ID(path->trace_id)) { cpumask_clear_cpu(cpu, mask); coresight_release_path(path); continue; @@ -458,9 +457,8 @@ static void etm_event_start(struct perf_event *event, int flags) struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); struct perf_output_handle *handle = &ctxt->handle; struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); - struct list_head *path; + struct coresight_path *path; u64 hw_id; - u8 trace_id; if (!csdev) goto fail; @@ -503,8 +501,7 @@ static void etm_event_start(struct perf_event *event, int flags) goto fail_end_stop; /* Finally enable the tracer */ - if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF, - &sink->perf_sink_id_map)) + if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF, path)) goto fail_disable_path; /* @@ -514,13 +511,11 @@ static void etm_event_start(struct perf_event *event, int flags) if (!cpumask_test_cpu(cpu, &event_data->aux_hwid_done)) { cpumask_set_cpu(cpu, &event_data->aux_hwid_done); - trace_id = coresight_trace_id_read_cpu_id_map(cpu, &sink->perf_sink_id_map); - hw_id = FIELD_PREP(CS_AUX_HW_ID_MAJOR_VERSION_MASK, CS_AUX_HW_ID_MAJOR_VERSION); hw_id |= FIELD_PREP(CS_AUX_HW_ID_MINOR_VERSION_MASK, CS_AUX_HW_ID_MINOR_VERSION); - hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK, trace_id); + hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK, path->trace_id); hw_id |= FIELD_PREP(CS_AUX_HW_ID_SINK_ID_MASK, coresight_get_sink_id(sink)); perf_report_aux_output_id(event, hw_id); @@ -558,7 +553,7 @@ static void etm_event_stop(struct perf_event *event, int mode) struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); struct perf_output_handle *handle = &ctxt->handle; struct etm_event_data *event_data; - struct list_head *path; + struct coresight_path *path; /* * If we still have access to the event_data via handle, |