summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYicong Yang <yangyicong@hisilicon.com>2024-04-11 20:30:30 +0800
committerWill Deacon <will@kernel.org>2024-04-12 12:54:55 +0100
commitb782e8d07baac95a5ce3f8773cc61f4ed7d0ccbc (patch)
tree35bb25978e577fe9ed2b032f1f22b78fe3f2357f
parent8f9f5041c64600b01b71f29fb8e2121e45bfb719 (diff)
downloadlinux-b782e8d07baac95a5ce3f8773cc61f4ed7d0ccbc.tar.gz
linux-b782e8d07baac95a5ce3f8773cc61f4ed7d0ccbc.tar.bz2
linux-b782e8d07baac95a5ce3f8773cc61f4ed7d0ccbc.zip
arm64: arm_pmuv3: Correctly extract and check the PMUVer
Currently we're using "sbfx" to extract the PMUVer from ID_AA64DFR0_EL1 and skip the init/reset if no PMU present when the extracted PMUVer is negative or is zero. However for PMUv3p8 the PMUVer will be 0b1000 and PMUVer extracted by "sbfx" will always be negative and we'll skip the init/reset in __init_el2_debug/reset_pmuserenr_el0 unexpectedly. So this patch use "ubfx" instead of "sbfx" to extract the PMUVer. If the PMUVer is implementation defined (0b1111) or not implemented(0b0000) then skip the reset/init. Previously we'll also skip the init/reset if the PMUVer is higher than the version we known (currently PMUv3p9), with this patch we'll only skip if the PMU is not implemented or implementation defined. This keeps consistence with how we probe the PMU in the driver with pmuv3_implemented(). Signed-off-by: Yicong Yang <yangyicong@hisilicon.com> Link: https://lore.kernel.org/r/20240411123030.7201-1-yangyicong@huawei.com Signed-off-by: Will Deacon <will@kernel.org>
-rw-r--r--arch/arm64/include/asm/assembler.h7
-rw-r--r--arch/arm64/include/asm/el2_setup.h9
2 files changed, 9 insertions, 7 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index ab8b396428da..9ecd076ba08f 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -480,9 +480,10 @@ alternative_endif
*/
.macro reset_pmuserenr_el0, tmpreg
mrs \tmpreg, id_aa64dfr0_el1
- sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
- cmp \tmpreg, #1 // Skip if no PMU present
- b.lt 9000f
+ ubfx \tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
+ cmp \tmpreg, #ID_AA64DFR0_EL1_PMUVer_NI
+ ccmp \tmpreg, #ID_AA64DFR0_EL1_PMUVer_IMP_DEF, #4, ne
+ b.eq 9000f // Skip if no PMU present or IMP_DEF
msr pmuserenr_el0, xzr // Disable PMU access from EL0
9000:
.endm
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index b7afaa026842..e4546b29dd0c 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -59,13 +59,14 @@
.macro __init_el2_debug
mrs x1, id_aa64dfr0_el1
- sbfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
- cmp x0, #1
- b.lt .Lskip_pmu_\@ // Skip if no PMU present
+ ubfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
+ cmp x0, #ID_AA64DFR0_EL1_PMUVer_NI
+ ccmp x0, #ID_AA64DFR0_EL1_PMUVer_IMP_DEF, #4, ne
+ b.eq .Lskip_pmu_\@ // Skip if no PMU present or IMP_DEF
mrs x0, pmcr_el0 // Disable debug access traps
ubfx x0, x0, #11, #5 // to EL2 and allow access to
.Lskip_pmu_\@:
- csel x2, xzr, x0, lt // all PMU counters from EL1
+ csel x2, xzr, x0, eq // all PMU counters from EL1
/* Statistical profiling */
ubfx x0, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4