summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/hyp/nvhe/mem_protect.c
diff options
context:
space:
mode:
authorQuentin Perret <qperret@google.com>2021-03-19 10:01:46 +0000
committerMarc Zyngier <maz@kernel.org>2021-03-19 12:02:19 +0000
commit90134ac9cabb69972d0a509bf08e108a73442184 (patch)
tree77d5e0f6a188232498265cc24eeea935c6442de2 /arch/arm64/kvm/hyp/nvhe/mem_protect.c
parent9589a38cdfeba0889590e6ef4627b439034d456c (diff)
downloadlinux-stable-90134ac9cabb69972d0a509bf08e108a73442184.tar.gz
linux-stable-90134ac9cabb69972d0a509bf08e108a73442184.tar.bz2
linux-stable-90134ac9cabb69972d0a509bf08e108a73442184.zip
KVM: arm64: Protect the .hyp sections from the host
When KVM runs in nVHE protected mode, use the host stage 2 to unmap the hypervisor sections by marking them as owned by the hypervisor itself. The long-term goal is to ensure the EL2 code can remain robust regardless of the host's state, so this starts by making sure the host cannot e.g. write to the .hyp sections directly. Acked-by: Will Deacon <will@kernel.org> Signed-off-by: Quentin Perret <qperret@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20210319100146.1149909-39-qperret@google.com
Diffstat (limited to 'arch/arm64/kvm/hyp/nvhe/mem_protect.c')
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c33
1 files changed, 33 insertions, 0 deletions
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 77b48c47344d..808e2471091b 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -27,6 +27,8 @@ struct host_kvm host_kvm;
struct hyp_pool host_s2_mem;
struct hyp_pool host_s2_dev;
+static const u8 pkvm_hyp_id = 1;
+
static void *host_s2_zalloc_pages_exact(size_t size)
{
return hyp_alloc_pages(&host_s2_mem, get_order(size));
@@ -182,6 +184,18 @@ static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
return false;
}
+static bool range_is_memory(u64 start, u64 end)
+{
+ struct kvm_mem_range r1, r2;
+
+ if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2))
+ return false;
+ if (r1.start != r2.start)
+ return false;
+
+ return true;
+}
+
static inline int __host_stage2_idmap(u64 start, u64 end,
enum kvm_pgtable_prot prot,
struct hyp_pool *pool)
@@ -229,6 +243,25 @@ unlock:
return ret;
}
+int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
+{
+ int ret;
+
+ /*
+ * host_stage2_unmap_dev_all() currently relies on MMIO mappings being
+ * non-persistent, so don't allow changing page ownership in MMIO range.
+ */
+ if (!range_is_memory(start, end))
+ return -EINVAL;
+
+ hyp_spin_lock(&host_kvm.lock);
+ ret = kvm_pgtable_stage2_set_owner(&host_kvm.pgt, start, end - start,
+ &host_s2_mem, pkvm_hyp_id);
+ hyp_spin_unlock(&host_kvm.lock);
+
+ return ret != -EAGAIN ? ret : 0;
+}
+
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
{
struct kvm_vcpu_fault_info fault;