summaryrefslogtreecommitdiffstats
path: root/virt/kvm/arm/mmu.c
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2020-07-23 11:17:14 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-08-05 09:59:51 +0200
commitfd412846a6ecc0ed1876c04a6ed93c5205c9bdf9 (patch)
treed484e257065232ad4b7238ab81bb3373656f1a86 /virt/kvm/arm/mmu.c
parent1aff51292ee881f79b226915ef15bdcb87a95fe2 (diff)
downloadlinux-stable-fd412846a6ecc0ed1876c04a6ed93c5205c9bdf9.tar.gz
linux-stable-fd412846a6ecc0ed1876c04a6ed93c5205c9bdf9.tar.bz2
linux-stable-fd412846a6ecc0ed1876c04a6ed93c5205c9bdf9.zip
KVM: arm64: Don't inherit exec permission across page-table levels
commit b757b47a2fcba584d4a32fd7ee68faca510ab96f upstream. If a stage-2 page-table contains an executable, read-only mapping at the pte level (e.g. due to dirty logging being enabled), a subsequent write fault to the same page which tries to install a larger block mapping (e.g. due to dirty logging having been disabled) will erroneously inherit the exec permission and consequently skip I-cache invalidation for the rest of the block. Ensure that exec permission is only inherited by write faults when the new mapping is of the same size as the existing one. A subsequent instruction abort will result in I-cache invalidation for the entire block mapping. Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Marc Zyngier <maz@kernel.org> Tested-by: Quentin Perret <qperret@google.com> Reviewed-by: Quentin Perret <qperret@google.com> Cc: Marc Zyngier <maz@kernel.org> Cc: <stable@vger.kernel.org> Link: https://lore.kernel.org/r/20200723101714.15873-1-will@kernel.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'virt/kvm/arm/mmu.c')
-rw-r--r--virt/kvm/arm/mmu.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index ce7fa37987e1..767ac4eab4fe 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1199,7 +1199,7 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
return true;
}
-static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
+static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr, unsigned long sz)
{
pud_t *pudp;
pmd_t *pmdp;
@@ -1211,11 +1211,11 @@ static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
return false;
if (pudp)
- return kvm_s2pud_exec(pudp);
+ return sz <= PUD_SIZE && kvm_s2pud_exec(pudp);
else if (pmdp)
- return kvm_s2pmd_exec(pmdp);
+ return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp);
else
- return kvm_s2pte_exec(ptep);
+ return sz == PAGE_SIZE && kvm_s2pte_exec(ptep);
}
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -1805,7 +1805,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* execute permissions, and we preserve whatever we have.
*/
needs_exec = exec_fault ||
- (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
+ (fault_status == FSC_PERM &&
+ stage2_is_exec(kvm, fault_ipa, vma_pagesize));
if (vma_pagesize == PUD_SIZE) {
pud_t new_pud = kvm_pfn_pud(pfn, mem_type);