summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-11-19 12:21:35 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2024-11-19 12:21:35 -0800
commit55db8eb4565f943dc0ebd1327cbe3d9d684f74e8 (patch)
tree2c51b8d22b3ba379889c50137595b18f38acd93a /arch
parent9db8b240704cf66b8c9caaad586034399ac39641 (diff)
parent8bca85cc1eb72e21a3544ab32e546a819d8674ca (diff)
downloadlinux-55db8eb4565f943dc0ebd1327cbe3d9d684f74e8.tar.gz
linux-55db8eb4565f943dc0ebd1327cbe3d9d684f74e8.tar.bz2
linux-55db8eb4565f943dc0ebd1327cbe3d9d684f74e8.zip
Merge tag 'x86_sev_for_v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 SEV updates from Borislav Petkov: - Do the proper memory conversion of guest memory in order to be able to kexec kernels in SNP guests along with other adjustments and cleanups to that effect - Start converting and moving functionality from the sev-guest driver into core code with the purpose of supporting the secure TSC SNP feature where the hypervisor cannot influence the TSC exposed to the guest anymore - Add a "nosnp" cmdline option in order to be able to disable SNP support in the hypervisor and thus free-up resources which are not going to be used - Cleanups [ Reminding myself about the endless TLA's again: SEV is the AMD Secure Encrypted Virtualization - Linus ] * tag 'x86_sev_for_v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/sev: Cleanup vc_handle_msr() x86/sev: Convert shared memory back to private on kexec x86/mm: Refactor __set_clr_pte_enc() x86/boot: Skip video memory access in the decompressor for SEV-ES/SNP virt: sev-guest: Carve out SNP message context structure virt: sev-guest: Reduce the scope of SNP command mutex virt: sev-guest: Consolidate SNP guest messaging parameters to a struct x86/sev: Cache the secrets page address x86/sev: Handle failures from snp_init() virt: sev-guest: Use AES GCM crypto library x86/virt: Provide "nosnp" boot option for sev kernel command line x86/virt: Move SEV-specific parsing into arch/x86/virt/svm
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/boot/compressed/misc.c15
-rw-r--r--arch/x86/coco/sev/core.c269
-rw-r--r--arch/x86/include/asm/sev-common.h27
-rw-r--r--arch/x86/include/asm/sev.h67
-rw-r--r--arch/x86/mm/mem_encrypt_amd.c77
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c11
-rw-r--r--arch/x86/virt/svm/Makefile1
-rw-r--r--arch/x86/virt/svm/cmdline.c45
8 files changed, 377 insertions, 135 deletions
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 04a35b2c26e9..0d37420cad02 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -385,6 +385,19 @@ static void parse_mem_encrypt(struct setup_header *hdr)
hdr->xloadflags |= XLF_MEM_ENCRYPTION;
}
+static void early_sev_detect(void)
+{
+ /*
+ * Accessing video memory causes guest termination because
+ * the boot stage2 #VC handler of SEV-ES/SNP guests does not
+ * support MMIO handling and kexec -c adds screen_info to the
+ * boot parameters passed to the kexec kernel, which causes
+ * console output to be dumped to both video and serial.
+ */
+ if (sev_status & MSR_AMD64_SEV_ES_ENABLED)
+ lines = cols = 0;
+}
+
/*
* The compressed kernel image (ZO), has been moved so that its position
* is against the end of the buffer used to hold the uncompressed kernel
@@ -440,6 +453,8 @@ asmlinkage __visible void *extract_kernel(void *rmode, unsigned char *output)
*/
early_tdx_detect();
+ early_sev_detect();
+
console_init();
/*
diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c
index de1df0cb45da..c5b0148b8c0a 100644
--- a/arch/x86/coco/sev/core.c
+++ b/arch/x86/coco/sev/core.c
@@ -92,6 +92,9 @@ static struct ghcb *boot_ghcb __section(".data");
/* Bitmap of SEV features supported by the hypervisor */
static u64 sev_hv_features __ro_after_init;
+/* Secrets page physical address from the CC blob */
+static u64 secrets_pa __ro_after_init;
+
/* #VC handler runtime per-CPU data */
struct sev_es_runtime_data {
struct ghcb ghcb_page;
@@ -141,33 +144,6 @@ static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
static DEFINE_PER_CPU(struct svsm_ca *, svsm_caa);
static DEFINE_PER_CPU(u64, svsm_caa_pa);
-struct sev_config {
- __u64 debug : 1,
-
- /*
- * Indicates when the per-CPU GHCB has been created and registered
- * and thus can be used by the BSP instead of the early boot GHCB.
- *
- * For APs, the per-CPU GHCB is created before they are started
- * and registered upon startup, so this flag can be used globally
- * for the BSP and APs.
- */
- ghcbs_initialized : 1,
-
- /*
- * Indicates when the per-CPU SVSM CA is to be used instead of the
- * boot SVSM CA.
- *
- * For APs, the per-CPU SVSM CA is created as part of the AP
- * bringup, so this flag can be used globally for the BSP and APs.
- */
- use_cas : 1,
-
- __reserved : 61;
-};
-
-static struct sev_config sev_cfg __read_mostly;
-
static __always_inline bool on_vc_stack(struct pt_regs *regs)
{
unsigned long sp = regs->sp;
@@ -722,45 +698,13 @@ void noinstr __sev_es_nmi_complete(void)
__sev_put_ghcb(&state);
}
-static u64 __init get_secrets_page(void)
-{
- u64 pa_data = boot_params.cc_blob_address;
- struct cc_blob_sev_info info;
- void *map;
-
- /*
- * The CC blob contains the address of the secrets page, check if the
- * blob is present.
- */
- if (!pa_data)
- return 0;
-
- map = early_memremap(pa_data, sizeof(info));
- if (!map) {
- pr_err("Unable to locate SNP secrets page: failed to map the Confidential Computing blob.\n");
- return 0;
- }
- memcpy(&info, map, sizeof(info));
- early_memunmap(map, sizeof(info));
-
- /* smoke-test the secrets page passed */
- if (!info.secrets_phys || info.secrets_len != PAGE_SIZE)
- return 0;
-
- return info.secrets_phys;
-}
-
static u64 __init get_snp_jump_table_addr(void)
{
struct snp_secrets_page *secrets;
void __iomem *mem;
- u64 pa, addr;
+ u64 addr;
- pa = get_secrets_page();
- if (!pa)
- return 0;
-
- mem = ioremap_encrypted(pa, PAGE_SIZE);
+ mem = ioremap_encrypted(secrets_pa, PAGE_SIZE);
if (!mem) {
pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n");
return 0;
@@ -1010,6 +954,137 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end)
set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
}
+static void set_pte_enc(pte_t *kpte, int level, void *va)
+{
+ struct pte_enc_desc d = {
+ .kpte = kpte,
+ .pte_level = level,
+ .va = va,
+ .encrypt = true
+ };
+
+ prepare_pte_enc(&d);
+ set_pte_enc_mask(kpte, d.pfn, d.new_pgprot);
+}
+
+static void unshare_all_memory(void)
+{
+ unsigned long addr, end, size, ghcb;
+ struct sev_es_runtime_data *data;
+ unsigned int npages, level;
+ bool skipped_addr;
+ pte_t *pte;
+ int cpu;
+
+ /* Unshare the direct mapping. */
+ addr = PAGE_OFFSET;
+ end = PAGE_OFFSET + get_max_mapped();
+
+ while (addr < end) {
+ pte = lookup_address(addr, &level);
+ size = page_level_size(level);
+ npages = size / PAGE_SIZE;
+ skipped_addr = false;
+
+ if (!pte || !pte_decrypted(*pte) || pte_none(*pte)) {
+ addr += size;
+ continue;
+ }
+
+ /*
+ * Ensure that all the per-CPU GHCBs are made private at the
+ * end of the unsharing loop so that the switch to the slower
+ * MSR protocol happens last.
+ */
+ for_each_possible_cpu(cpu) {
+ data = per_cpu(runtime_data, cpu);
+ ghcb = (unsigned long)&data->ghcb_page;
+
+ if (addr <= ghcb && ghcb <= addr + size) {
+ skipped_addr = true;
+ break;
+ }
+ }
+
+ if (!skipped_addr) {
+ set_pte_enc(pte, level, (void *)addr);
+ snp_set_memory_private(addr, npages);
+ }
+ addr += size;
+ }
+
+ /* Unshare all bss decrypted memory. */
+ addr = (unsigned long)__start_bss_decrypted;
+ end = (unsigned long)__start_bss_decrypted_unused;
+ npages = (end - addr) >> PAGE_SHIFT;
+
+ for (; addr < end; addr += PAGE_SIZE) {
+ pte = lookup_address(addr, &level);
+ if (!pte || !pte_decrypted(*pte) || pte_none(*pte))
+ continue;
+
+ set_pte_enc(pte, level, (void *)addr);
+ }
+ addr = (unsigned long)__start_bss_decrypted;
+ snp_set_memory_private(addr, npages);
+
+ __flush_tlb_all();
+}
+
+/* Stop new private<->shared conversions */
+void snp_kexec_begin(void)
+{
+ if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ return;
+
+ if (!IS_ENABLED(CONFIG_KEXEC_CORE))
+ return;
+
+ /*
+ * Crash kernel ends up here with interrupts disabled: can't wait for
+ * conversions to finish.
+ *
+ * If race happened, just report and proceed.
+ */
+ if (!set_memory_enc_stop_conversion())
+ pr_warn("Failed to stop shared<->private conversions\n");
+}
+
+void snp_kexec_finish(void)
+{
+ struct sev_es_runtime_data *data;
+ unsigned int level, cpu;
+ unsigned long size;
+ struct ghcb *ghcb;
+ pte_t *pte;
+
+ if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ return;
+
+ if (!IS_ENABLED(CONFIG_KEXEC_CORE))
+ return;
+
+ unshare_all_memory();
+
+ /*
+ * Switch to using the MSR protocol to change per-CPU GHCBs to
+ * private. All the per-CPU GHCBs have been switched back to private,
+ * so can't do any more GHCB calls to the hypervisor beyond this point
+ * until the kexec'ed kernel starts running.
+ */
+ boot_ghcb = NULL;
+ sev_cfg.ghcbs_initialized = false;
+
+ for_each_possible_cpu(cpu) {
+ data = per_cpu(runtime_data, cpu);
+ ghcb = &data->ghcb_page;
+ pte = lookup_address((unsigned long)ghcb, &level);
+ size = page_level_size(level);
+ set_pte_enc(pte, level, (void *)ghcb);
+ snp_set_memory_private((unsigned long)ghcb, (size / PAGE_SIZE));
+ }
+}
+
static int snp_set_vmsa(void *va, void *caa, int apic_id, bool make_vmsa)
{
int ret;
@@ -1331,35 +1406,39 @@ int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
return 0;
}
+/* Writes to the SVSM CAA MSR are ignored */
+static enum es_result __vc_handle_msr_caa(struct pt_regs *regs, bool write)
+{
+ if (write)
+ return ES_OK;
+
+ regs->ax = lower_32_bits(this_cpu_read(svsm_caa_pa));
+ regs->dx = upper_32_bits(this_cpu_read(svsm_caa_pa));
+
+ return ES_OK;
+}
+
static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
{
struct pt_regs *regs = ctxt->regs;
enum es_result ret;
- u64 exit_info_1;
+ bool write;
/* Is it a WRMSR? */
- exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0;
+ write = ctxt->insn.opcode.bytes[1] == 0x30;
- if (regs->cx == MSR_SVSM_CAA) {
- /* Writes to the SVSM CAA msr are ignored */
- if (exit_info_1)
- return ES_OK;
-
- regs->ax = lower_32_bits(this_cpu_read(svsm_caa_pa));
- regs->dx = upper_32_bits(this_cpu_read(svsm_caa_pa));
-
- return ES_OK;
- }
+ if (regs->cx == MSR_SVSM_CAA)
+ return __vc_handle_msr_caa(regs, write);
ghcb_set_rcx(ghcb, regs->cx);
- if (exit_info_1) {
+ if (write) {
ghcb_set_rax(ghcb, regs->ax);
ghcb_set_rdx(ghcb, regs->dx);
}
- ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0);
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, write, 0);
- if ((ret == ES_OK) && (!exit_info_1)) {
+ if ((ret == ES_OK) && !write) {
regs->ax = ghcb->save.rax;
regs->dx = ghcb->save.rdx;
}
@@ -2300,6 +2379,11 @@ bool __head snp_init(struct boot_params *bp)
if (!cc_info)
return false;
+ if (cc_info->secrets_phys && cc_info->secrets_len == PAGE_SIZE)
+ secrets_pa = cc_info->secrets_phys;
+ else
+ return false;
+
setup_cpuid_table(cc_info);
svsm_setup(cc_info);
@@ -2374,23 +2458,6 @@ static int __init report_snp_info(void)
}
arch_initcall(report_snp_info);
-static int __init init_sev_config(char *str)
-{
- char *s;
-
- while ((s = strsep(&str, ","))) {
- if (!strcmp(s, "debug")) {
- sev_cfg.debug = true;
- continue;
- }
-
- pr_info("SEV command-line option '%s' was not recognized\n", s);
- }
-
- return 1;
-}
-__setup("sev=", init_sev_config);
-
static void update_attest_input(struct svsm_call *call, struct svsm_attest_call *input)
{
/* If (new) lengths have been returned, propagate them up */
@@ -2441,7 +2508,8 @@ int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call,
}
EXPORT_SYMBOL_GPL(snp_issue_svsm_attest_req);
-int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
+int snp_issue_guest_request(struct snp_guest_req *req, struct snp_req_data *input,
+ struct snp_guest_request_ioctl *rio)
{
struct ghcb_state state;
struct es_em_ctxt ctxt;
@@ -2465,12 +2533,12 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct sn
vc_ghcb_invalidate(ghcb);
- if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
+ if (req->exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
ghcb_set_rax(ghcb, input->data_gpa);
ghcb_set_rbx(ghcb, input->data_npages);
}
- ret = sev_es_ghcb_hv_call(ghcb, &ctxt, exit_code, input->req_gpa, input->resp_gpa);
+ ret = sev_es_ghcb_hv_call(ghcb, &ctxt, req->exit_code, input->req_gpa, input->resp_gpa);
if (ret)
goto e_put;
@@ -2485,7 +2553,7 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct sn
case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN):
/* Number of expected pages are returned in RBX */
- if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
+ if (req->exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
input->data_npages = ghcb_get_rbx(ghcb);
ret = -ENOSPC;
break;
@@ -2513,16 +2581,11 @@ static struct platform_device sev_guest_device = {
static int __init snp_init_platform_device(void)
{
struct sev_guest_platform_data data;
- u64 gpa;
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
return -ENODEV;
- gpa = get_secrets_page();
- if (!gpa)
- return -ENODEV;
-
- data.secrets_gpa = gpa;
+ data.secrets_gpa = secrets_pa;
if (platform_device_add_data(&sev_guest_device, &data, sizeof(data)))
return -ENODEV;
diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
index 98726c2b04f8..50f5666938c0 100644
--- a/arch/x86/include/asm/sev-common.h
+++ b/arch/x86/include/asm/sev-common.h
@@ -220,4 +220,31 @@ struct snp_psc_desc {
#define GHCB_ERR_INVALID_INPUT 5
#define GHCB_ERR_INVALID_EVENT 6
+struct sev_config {
+ __u64 debug : 1,
+
+ /*
+ * Indicates when the per-CPU GHCB has been created and registered
+ * and thus can be used by the BSP instead of the early boot GHCB.
+ *
+ * For APs, the per-CPU GHCB is created before they are started
+ * and registered upon startup, so this flag can be used globally
+ * for the BSP and APs.
+ */
+ ghcbs_initialized : 1,
+
+ /*
+ * Indicates when the per-CPU SVSM CA is to be used instead of the
+ * boot SVSM CA.
+ *
+ * For APs, the per-CPU SVSM CA is created as part of the AP
+ * bringup, so this flag can be used globally for the BSP and APs.
+ */
+ use_cas : 1,
+
+ __reserved : 61;
+};
+
+extern struct sev_config sev_cfg;
+
#endif
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index ee34ab00a8d6..91f08af31078 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -120,6 +120,9 @@ struct snp_req_data {
};
#define MAX_AUTHTAG_LEN 32
+#define AUTHTAG_LEN 16
+#define AAD_LEN 48
+#define MSG_HDR_VER 1
/* See SNP spec SNP_GUEST_REQUEST section for the structure */
enum msg_type {
@@ -171,6 +174,19 @@ struct sev_guest_platform_data {
u64 secrets_gpa;
};
+struct snp_guest_req {
+ void *req_buf;
+ size_t req_sz;
+
+ void *resp_buf;
+ size_t resp_sz;
+
+ u64 exit_code;
+ unsigned int vmpck_id;
+ u8 msg_version;
+ u8 msg_type;
+};
+
/*
* The secrets page contains 96-bytes of reserved field that can be used by
* the guest OS. The guest OS uses the area to save the message sequence
@@ -218,6 +234,27 @@ struct snp_secrets_page {
u8 rsvd4[3744];
} __packed;
+struct snp_msg_desc {
+ /* request and response are in unencrypted memory */
+ struct snp_guest_msg *request, *response;
+
+ /*
+ * Avoid information leakage by double-buffering shared messages
+ * in fields that are in regular encrypted memory.
+ */
+ struct snp_guest_msg secret_request, secret_response;
+
+ struct snp_secrets_page *secrets;
+ struct snp_req_data input;
+
+ void *certs_data;
+
+ struct aesgcm_ctx *ctx;
+
+ u32 *os_area_msg_seqno;
+ u8 *vmpck;
+};
+
/*
* The SVSM Calling Area (CA) related structures.
*/
@@ -285,6 +322,22 @@ struct svsm_attest_call {
u8 rsvd[4];
};
+/* PTE descriptor used for the prepare_pte_enc() operations. */
+struct pte_enc_desc {
+ pte_t *kpte;
+ int pte_level;
+ bool encrypt;
+ /* pfn of the kpte above */
+ unsigned long pfn;
+ /* physical address of @pfn */
+ unsigned long pa;
+ /* virtual address of @pfn */
+ void *va;
+ /* memory covered by the pte */
+ unsigned long size;
+ pgprot_t new_pgprot;
+};
+
/*
* SVSM protocol structure
*/
@@ -392,13 +445,18 @@ void snp_set_wakeup_secondary_cpu(void);
bool snp_init(struct boot_params *bp);
void __noreturn snp_abort(void);
void snp_dmi_setup(void);
-int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
+int snp_issue_guest_request(struct snp_guest_req *req, struct snp_req_data *input,
+ struct snp_guest_request_ioctl *rio);
int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call, struct svsm_attest_call *input);
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
u64 snp_get_unsupported_features(u64 status);
u64 sev_get_status(void);
void sev_show_status(void);
void snp_update_svsm_ca(void);
+int prepare_pte_enc(struct pte_enc_desc *d);
+void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot);
+void snp_kexec_finish(void);
+void snp_kexec_begin(void);
#else /* !CONFIG_AMD_MEM_ENCRYPT */
@@ -422,7 +480,8 @@ static inline void snp_set_wakeup_secondary_cpu(void) { }
static inline bool snp_init(struct boot_params *bp) { return false; }
static inline void snp_abort(void) { }
static inline void snp_dmi_setup(void) { }
-static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
+static inline int snp_issue_guest_request(struct snp_guest_req *req, struct snp_req_data *input,
+ struct snp_guest_request_ioctl *rio)
{
return -ENOTTY;
}
@@ -435,6 +494,10 @@ static inline u64 snp_get_unsupported_features(u64 status) { return 0; }
static inline u64 sev_get_status(void) { return 0; }
static inline void sev_show_status(void) { }
static inline void snp_update_svsm_ca(void) { }
+static inline int prepare_pte_enc(struct pte_enc_desc *d) { return 0; }
+static inline void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot) { }
+static inline void snp_kexec_finish(void) { }
+static inline void snp_kexec_begin(void) { }
#endif /* CONFIG_AMD_MEM_ENCRYPT */
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index 86a476a426c2..774f9677458f 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -311,59 +311,82 @@ static int amd_enc_status_change_finish(unsigned long vaddr, int npages, bool en
return 0;
}
-static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
+int prepare_pte_enc(struct pte_enc_desc *d)
{
- pgprot_t old_prot, new_prot;
- unsigned long pfn, pa, size;
- pte_t new_pte;
+ pgprot_t old_prot;
- pfn = pg_level_to_pfn(level, kpte, &old_prot);
- if (!pfn)
- return;
+ d->pfn = pg_level_to_pfn(d->pte_level, d->kpte, &old_prot);
+ if (!d->pfn)
+ return 1;
- new_prot = old_prot;
- if (enc)
- pgprot_val(new_prot) |= _PAGE_ENC;
+ d->new_pgprot = old_prot;
+ if (d->encrypt)
+ pgprot_val(d->new_pgprot) |= _PAGE_ENC;
else
- pgprot_val(new_prot) &= ~_PAGE_ENC;
+ pgprot_val(d->new_pgprot) &= ~_PAGE_ENC;
/* If prot is same then do nothing. */
- if (pgprot_val(old_prot) == pgprot_val(new_prot))
- return;
+ if (pgprot_val(old_prot) == pgprot_val(d->new_pgprot))
+ return 1;
- pa = pfn << PAGE_SHIFT;
- size = page_level_size(level);
+ d->pa = d->pfn << PAGE_SHIFT;
+ d->size = page_level_size(d->pte_level);
/*
- * We are going to perform in-place en-/decryption and change the
- * physical page attribute from C=1 to C=0 or vice versa. Flush the
- * caches to ensure that data gets accessed with the correct C-bit.
+ * In-place en-/decryption and physical page attribute change
+ * from C=1 to C=0 or vice versa will be performed. Flush the
+ * caches to ensure that data gets accessed with the correct
+ * C-bit.
*/
- clflush_cache_range(__va(pa), size);
+ if (d->va)
+ clflush_cache_range(d->va, d->size);
+ else
+ clflush_cache_range(__va(d->pa), d->size);
+
+ return 0;
+}
+
+void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot)
+{
+ pte_t new_pte;
+
+ /* Change the page encryption mask. */
+ new_pte = pfn_pte(pfn, new_prot);
+ set_pte_atomic(kpte, new_pte);
+}
+
+static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
+{
+ struct pte_enc_desc d = {
+ .kpte = kpte,
+ .pte_level = level,
+ .encrypt = enc
+ };
+
+ if (prepare_pte_enc(&d))
+ return;
/* Encrypt/decrypt the contents in-place */
if (enc) {
- sme_early_encrypt(pa, size);
+ sme_early_encrypt(d.pa, d.size);
} else {
- sme_early_decrypt(pa, size);
+ sme_early_decrypt(d.pa, d.size);
/*
* ON SNP, the page state in the RMP table must happen
* before the page table updates.
*/
- early_snp_set_memory_shared((unsigned long)__va(pa), pa, 1);
+ early_snp_set_memory_shared((unsigned long)__va(d.pa), d.pa, 1);
}
- /* Change the page encryption mask. */
- new_pte = pfn_pte(pfn, new_prot);
- set_pte_atomic(kpte, new_pte);
+ set_pte_enc_mask(kpte, d.pfn, d.new_pgprot);
/*
* If page is set encrypted in the page table, then update the RMP table to
* add this page as private.
*/
if (enc)
- early_snp_set_memory_private((unsigned long)__va(pa), pa, 1);
+ early_snp_set_memory_private((unsigned long)__va(d.pa), d.pa, 1);
}
static int __init early_set_memory_enc_dec(unsigned long vaddr,
@@ -467,6 +490,8 @@ void __init sme_early_init(void)
x86_platform.guest.enc_status_change_finish = amd_enc_status_change_finish;
x86_platform.guest.enc_tlb_flush_required = amd_enc_tlb_flush_required;
x86_platform.guest.enc_cache_flush_required = amd_enc_cache_flush_required;
+ x86_platform.guest.enc_kexec_begin = snp_kexec_begin;
+ x86_platform.guest.enc_kexec_finish = snp_kexec_finish;
/*
* AMD-SEV-ES intercepts the RDMSR to read the X2APIC ID in the
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index ac33b2263a43..e6c7686f443a 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -495,10 +495,10 @@ void __head sme_enable(struct boot_params *bp)
unsigned int eax, ebx, ecx, edx;
unsigned long feature_mask;
unsigned long me_mask;
- bool snp;
+ bool snp_en;
u64 msr;
- snp = snp_init(bp);
+ snp_en = snp_init(bp);
/* Check for the SME/SEV support leaf */
eax = 0x80000000;
@@ -531,8 +531,11 @@ void __head sme_enable(struct boot_params *bp)
RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
- /* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */
- if (snp && !(msr & MSR_AMD64_SEV_SNP_ENABLED))
+ /*
+ * Any discrepancies between the presence of a CC blob and SNP
+ * enablement abort the guest.
+ */
+ if (snp_en ^ !!(msr & MSR_AMD64_SEV_SNP_ENABLED))
snp_abort();
/* Check if memory encryption is enabled */
diff --git a/arch/x86/virt/svm/Makefile b/arch/x86/virt/svm/Makefile
index ef2a31bdcc70..eca6d71355fa 100644
--- a/arch/x86/virt/svm/Makefile
+++ b/arch/x86/virt/svm/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_KVM_AMD_SEV) += sev.o
+obj-$(CONFIG_CPU_SUP_AMD) += cmdline.o
diff --git a/arch/x86/virt/svm/cmdline.c b/arch/x86/virt/svm/cmdline.c
new file mode 100644
index 000000000000..affa2759fa20
--- /dev/null
+++ b/arch/x86/virt/svm/cmdline.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD SVM-SEV command line parsing support
+ *
+ * Copyright (C) 2023 - 2024 Advanced Micro Devices, Inc.
+ *
+ * Author: Michael Roth <michael.roth@amd.com>
+ */
+
+#include <linux/string.h>
+#include <linux/printk.h>
+#include <linux/cache.h>
+#include <linux/cpufeature.h>
+
+#include <asm/sev-common.h>
+
+struct sev_config sev_cfg __read_mostly;
+
+static int __init init_sev_config(char *str)
+{
+ char *s;
+
+ while ((s = strsep(&str, ","))) {
+ if (!strcmp(s, "debug")) {
+ sev_cfg.debug = true;
+ continue;
+ }
+
+ if (!strcmp(s, "nosnp")) {
+ if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) {
+ setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
+ cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
+ continue;
+ } else {
+ goto warn;
+ }
+ }
+
+warn:
+ pr_info("SEV command-line option '%s' was not recognized\n", s);
+ }
+
+ return 1;
+}
+__setup("sev=", init_sev_config);