summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-19 13:34:34 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-19 13:34:34 -0700
commit2fe296a61a4e2ec11efc41cb8d645c52e0113f55 (patch)
tree5632d414c32055b225dabb14044923b9f195f69c /arch
parente5a489abcfd216d07ad6b33ea0d191e61d0f25ea (diff)
parent63a1e1c95e60e798fa09ab3c536fb555aa5bbf2b (diff)
downloadlinux-2fe296a61a4e2ec11efc41cb8d645c52e0113f55.tar.gz
linux-2fe296a61a4e2ec11efc41cb8d645c52e0113f55.tar.bz2
linux-2fe296a61a4e2ec11efc41cb8d645c52e0113f55.zip
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes/cleanups from Catalin Marinas: - Avoid taking a mutex in the secondary CPU bring-up path when interrupts are disabled - Ignore perf exclude_hv when the kernel is running in Hyp mode - Remove redundant instruction in cmpxchg * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64/cpufeature: don't use mutex in bringup path arm64: perf: Ignore exclude_hv when kernel is running in HYP arm64: Remove redundant mov from LL/SC cmpxchg
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/atomic_ll_sc.h1
-rw-r--r--arch/arm64/include/asm/cpufeature.h12
-rw-r--r--arch/arm64/include/asm/kvm_host.h8
-rw-r--r--arch/arm64/kernel/cpufeature.c23
-rw-r--r--arch/arm64/kernel/perf_event.c23
5 files changed, 53 insertions, 14 deletions
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index f819fdcff1ac..f5a2d09afb38 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -264,7 +264,6 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
" st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
" cbnz %w[tmp], 1b\n" \
" " #mb "\n" \
- " mov %" #w "[oldval], %" #w "[old]\n" \
"2:" \
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
[v] "+Q" (*(unsigned long *)ptr) \
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index e7f84a7b4465..428ee1f2468c 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -115,6 +115,7 @@ struct arm64_cpu_capabilities {
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
+extern struct static_key_false arm64_const_caps_ready;
bool this_cpu_has_cap(unsigned int cap);
@@ -124,7 +125,7 @@ static inline bool cpu_have_feature(unsigned int num)
}
/* System capability check for constant caps */
-static inline bool cpus_have_const_cap(int num)
+static inline bool __cpus_have_const_cap(int num)
{
if (num >= ARM64_NCAPS)
return false;
@@ -138,6 +139,14 @@ static inline bool cpus_have_cap(unsigned int num)
return test_bit(num, cpu_hwcaps);
}
+static inline bool cpus_have_const_cap(int num)
+{
+ if (static_branch_likely(&arm64_const_caps_ready))
+ return __cpus_have_const_cap(num);
+ else
+ return cpus_have_cap(num);
+}
+
static inline void cpus_set_cap(unsigned int num)
{
if (num >= ARM64_NCAPS) {
@@ -145,7 +154,6 @@ static inline void cpus_set_cap(unsigned int num)
num, ARM64_NCAPS);
} else {
__set_bit(num, cpu_hwcaps);
- static_branch_enable(&cpu_hwcap_keys[num]);
}
}
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 5e19165c5fa8..1f252a95bc02 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -24,6 +24,7 @@
#include <linux/types.h>
#include <linux/kvm_types.h>
+#include <asm/cpufeature.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
@@ -355,9 +356,12 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
unsigned long vector_ptr)
{
/*
- * Call initialization code, and switch to the full blown
- * HYP code.
+ * Call initialization code, and switch to the full blown HYP code.
+ * If the cpucaps haven't been finalized yet, something has gone very
+ * wrong, and hyp will crash and burn when it uses any
+ * cpus_have_const_cap() wrapper.
*/
+ BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
}
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 94b8f7fc3310..817ce3365e20 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -985,8 +985,16 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
*/
void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
{
- for (; caps->matches; caps++)
- if (caps->enable && cpus_have_cap(caps->capability))
+ for (; caps->matches; caps++) {
+ unsigned int num = caps->capability;
+
+ if (!cpus_have_cap(num))
+ continue;
+
+ /* Ensure cpus_have_const_cap(num) works */
+ static_branch_enable(&cpu_hwcap_keys[num]);
+
+ if (caps->enable) {
/*
* Use stop_machine() as it schedules the work allowing
* us to modify PSTATE, instead of on_each_cpu() which
@@ -994,6 +1002,8 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
* we return.
*/
stop_machine(caps->enable, NULL, cpu_online_mask);
+ }
+ }
}
/*
@@ -1096,6 +1106,14 @@ static void __init setup_feature_capabilities(void)
enable_cpu_capabilities(arm64_features);
}
+DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
+EXPORT_SYMBOL(arm64_const_caps_ready);
+
+static void __init mark_const_caps_ready(void)
+{
+ static_branch_enable(&arm64_const_caps_ready);
+}
+
/*
* Check if the current CPU has a given feature capability.
* Should be called from non-preemptible context.
@@ -1131,6 +1149,7 @@ void __init setup_cpu_features(void)
/* Set the CPU feature capabilies */
setup_feature_capabilities();
enable_errata_workarounds();
+ mark_const_caps_ready();
setup_elf_hwcaps(arm64_elf_hwcaps);
if (system_supports_32bit_el0())
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index bcc79471b38e..83a1b1ad189f 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -877,15 +877,24 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
if (attr->exclude_idle)
return -EPERM;
- if (is_kernel_in_hyp_mode() &&
- attr->exclude_kernel != attr->exclude_hv)
- return -EINVAL;
+
+ /*
+ * If we're running in hyp mode, then we *are* the hypervisor.
+ * Therefore we ignore exclude_hv in this configuration, since
+ * there's no hypervisor to sample anyway. This is consistent
+ * with other architectures (x86 and Power).
+ */
+ if (is_kernel_in_hyp_mode()) {
+ if (!attr->exclude_kernel)
+ config_base |= ARMV8_PMU_INCLUDE_EL2;
+ } else {
+ if (attr->exclude_kernel)
+ config_base |= ARMV8_PMU_EXCLUDE_EL1;
+ if (!attr->exclude_hv)
+ config_base |= ARMV8_PMU_INCLUDE_EL2;
+ }
if (attr->exclude_user)
config_base |= ARMV8_PMU_EXCLUDE_EL0;
- if (!is_kernel_in_hyp_mode() && attr->exclude_kernel)
- config_base |= ARMV8_PMU_EXCLUDE_EL1;
- if (!attr->exclude_hv)
- config_base |= ARMV8_PMU_INCLUDE_EL2;
/*
* Install the filter into config_base as this is used to