summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2017-10-23 17:11:21 +0100
committerChristoffer Dall <christoffer.dall@linaro.org>2018-01-08 15:20:46 +0100
commit7a3796d2ef5bb948f709467eef1bf96edbfc67a0 (patch)
tree8b33f893ac17f71126f7960a7909d18be397353d /virt
parenta9c0e12ebee56ef06b7eccdbc73bab71d0018df8 (diff)
downloadlinux-7a3796d2ef5bb948f709467eef1bf96edbfc67a0.tar.gz
linux-7a3796d2ef5bb948f709467eef1bf96edbfc67a0.tar.bz2
linux-7a3796d2ef5bb948f709467eef1bf96edbfc67a0.zip
KVM: arm/arm64: Preserve Exec permission across R/W permission faults
So far, we loose the Exec property whenever we take permission faults, as we always reconstruct the PTE/PMD from scratch. This can be counter productive as we can end-up with the following fault sequence: X -> RO -> ROX -> RW -> RWX Instead, we can lookup the existing PTE/PMD and clear the XN bit in the new entry if it was already cleared in the old one, leadig to a much nicer fault sequence: X -> ROX -> RWX Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/mmu.c27
1 files changed, 27 insertions, 0 deletions
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index f956efbd933d..b83b5a8442bb 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -926,6 +926,25 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
return 0;
}
+static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
+{
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ pmdp = stage2_get_pmd(kvm, NULL, addr);
+ if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp))
+ return false;
+
+ if (pmd_thp_or_huge(*pmdp))
+ return kvm_s2pmd_exec(pmdp);
+
+ ptep = pte_offset_kernel(pmdp, addr);
+ if (!ptep || pte_none(*ptep) || !pte_present(*ptep))
+ return false;
+
+ return kvm_s2pte_exec(ptep);
+}
+
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
phys_addr_t addr, const pte_t *new_pte,
unsigned long flags)
@@ -1407,6 +1426,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (exec_fault) {
new_pmd = kvm_s2pmd_mkexec(new_pmd);
invalidate_icache_guest_page(vcpu, pfn, PMD_SIZE);
+ } else if (fault_status == FSC_PERM) {
+ /* Preserve execute if XN was already cleared */
+ if (stage2_is_exec(kvm, fault_ipa))
+ new_pmd = kvm_s2pmd_mkexec(new_pmd);
}
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
@@ -1425,6 +1448,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (exec_fault) {
new_pte = kvm_s2pte_mkexec(new_pte);
invalidate_icache_guest_page(vcpu, pfn, PAGE_SIZE);
+ } else if (fault_status == FSC_PERM) {
+ /* Preserve execute if XN was already cleared */
+ if (stage2_is_exec(kvm, fault_ipa))
+ new_pte = kvm_s2pte_mkexec(new_pte);
}
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);