summaryrefslogtreecommitdiffstats
path: root/arch/x86/events/intel/lbr.c
diff options
context:
space:
mode:
authorKan Liang <kan.liang@linux.intel.com>2020-07-03 05:49:12 -0700
committerPeter Zijlstra <peterz@infradead.org>2020-07-08 11:38:52 +0200
commitf42be8651a7a9d5cb165e5d176fc0b09621b4f4d (patch)
tree34920f9367f679edb7efc4e00637633b69b54444 /arch/x86/events/intel/lbr.c
parent530bfff6480307d210734222a54d56af7f908957 (diff)
downloadlinux-stable-f42be8651a7a9d5cb165e5d176fc0b09621b4f4d.tar.gz
linux-stable-f42be8651a7a9d5cb165e5d176fc0b09621b4f4d.tar.bz2
linux-stable-f42be8651a7a9d5cb165e5d176fc0b09621b4f4d.zip
perf/x86/intel/lbr: Use dynamic data structure for task_ctx
The type of task_ctx is hardcoded as struct x86_perf_task_context, which doesn't apply for Architecture LBR. For example, Architecture LBR doesn't have the TOS MSR. The number of LBR entries is variable. A new struct will be introduced for Architecture LBR. Perf has to determine the type of task_ctx at run time. The type of task_ctx pointer is changed to 'void *', which will be determined at run time. The generic LBR optimization can be shared between Architecture LBR and model-specific LBR. Both need to access the structure for the generic LBR optimization. A helper task_context_opt() is introduced to retrieve the pointer of the structure at run time. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/1593780569-62993-7-git-send-email-kan.liang@linux.intel.com
Diffstat (limited to 'arch/x86/events/intel/lbr.c')
-rw-r--r--arch/x86/events/intel/lbr.c59
1 files changed, 26 insertions, 33 deletions
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index bba9939635b6..e62baa996474 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -355,18 +355,17 @@ void intel_pmu_lbr_restore(void *ctx)
wrmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
}
-static __always_inline bool
-lbr_is_reset_in_cstate(struct x86_perf_task_context *task_ctx)
+static __always_inline bool lbr_is_reset_in_cstate(void *ctx)
{
- return !rdlbr_from(task_ctx->tos);
+ return !rdlbr_from(((struct x86_perf_task_context *)ctx)->tos);
}
-static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
+static void __intel_pmu_lbr_restore(void *ctx)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- if (task_ctx->opt.lbr_callstack_users == 0 ||
- task_ctx->opt.lbr_stack_state == LBR_NONE) {
+ if (task_context_opt(ctx)->lbr_callstack_users == 0 ||
+ task_context_opt(ctx)->lbr_stack_state == LBR_NONE) {
intel_pmu_lbr_reset();
return;
}
@@ -376,16 +375,16 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
* - No one else touched them, and
* - Was not cleared in Cstate
*/
- if ((task_ctx == cpuc->last_task_ctx) &&
- (task_ctx->opt.log_id == cpuc->last_log_id) &&
- !lbr_is_reset_in_cstate(task_ctx)) {
- task_ctx->opt.lbr_stack_state = LBR_NONE;
+ if ((ctx == cpuc->last_task_ctx) &&
+ (task_context_opt(ctx)->log_id == cpuc->last_log_id) &&
+ !lbr_is_reset_in_cstate(ctx)) {
+ task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
return;
}
- x86_pmu.lbr_restore(task_ctx);
+ x86_pmu.lbr_restore(ctx);
- task_ctx->opt.lbr_stack_state = LBR_NONE;
+ task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
}
void intel_pmu_lbr_save(void *ctx)
@@ -415,27 +414,27 @@ void intel_pmu_lbr_save(void *ctx)
rdmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
}
-static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
+static void __intel_pmu_lbr_save(void *ctx)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- if (task_ctx->opt.lbr_callstack_users == 0) {
- task_ctx->opt.lbr_stack_state = LBR_NONE;
+ if (task_context_opt(ctx)->lbr_callstack_users == 0) {
+ task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
return;
}
- x86_pmu.lbr_save(task_ctx);
+ x86_pmu.lbr_save(ctx);
- task_ctx->opt.lbr_stack_state = LBR_VALID;
+ task_context_opt(ctx)->lbr_stack_state = LBR_VALID;
- cpuc->last_task_ctx = task_ctx;
- cpuc->last_log_id = ++task_ctx->opt.log_id;
+ cpuc->last_task_ctx = ctx;
+ cpuc->last_log_id = ++task_context_opt(ctx)->log_id;
}
void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
struct perf_event_context *next)
{
- struct x86_perf_task_context *prev_ctx_data, *next_ctx_data;
+ void *prev_ctx_data, *next_ctx_data;
swap(prev->task_ctx_data, next->task_ctx_data);
@@ -451,14 +450,14 @@ void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
if (!prev_ctx_data || !next_ctx_data)
return;
- swap(prev_ctx_data->opt.lbr_callstack_users,
- next_ctx_data->opt.lbr_callstack_users);
+ swap(task_context_opt(prev_ctx_data)->lbr_callstack_users,
+ task_context_opt(next_ctx_data)->lbr_callstack_users);
}
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- struct x86_perf_task_context *task_ctx;
+ void *task_ctx;
if (!cpuc->lbr_users)
return;
@@ -495,7 +494,6 @@ static inline bool branch_user_callstack(unsigned br_sel)
void intel_pmu_lbr_add(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- struct x86_perf_task_context *task_ctx;
if (!x86_pmu.lbr_nr)
return;
@@ -505,10 +503,8 @@ void intel_pmu_lbr_add(struct perf_event *event)
cpuc->br_sel = event->hw.branch_reg.reg;
- if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data) {
- task_ctx = event->ctx->task_ctx_data;
- task_ctx->opt.lbr_callstack_users++;
- }
+ if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data)
+ task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users++;
/*
* Request pmu::sched_task() callback, which will fire inside the
@@ -539,16 +535,13 @@ void intel_pmu_lbr_add(struct perf_event *event)
void intel_pmu_lbr_del(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- struct x86_perf_task_context *task_ctx;
if (!x86_pmu.lbr_nr)
return;
if (branch_user_callstack(cpuc->br_sel) &&
- event->ctx->task_ctx_data) {
- task_ctx = event->ctx->task_ctx_data;
- task_ctx->opt.lbr_callstack_users--;
- }
+ event->ctx->task_ctx_data)
+ task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users--;
if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
cpuc->lbr_select = 0;