summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/mem_encrypt_amd.c
blob: 422602f6039b82297c71ce7802d560b5ef550486 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
// SPDX-License-Identifier: GPL-2.0-only
/*
 * AMD Memory Encryption Support
 *
 * Copyright (C) 2016 Advanced Micro Devices, Inc.
 *
 * Author: Tom Lendacky <thomas.lendacky@amd.com>
 */

#define DISABLE_BRANCH_PROFILING

#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/dma-direct.h>
#include <linux/swiotlb.h>
#include <linux/mem_encrypt.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/cc_platform.h>

#include <asm/tlbflush.h>
#include <asm/fixmap.h>
#include <asm/setup.h>
#include <asm/mem_encrypt.h>
#include <asm/bootparam.h>
#include <asm/set_memory.h>
#include <asm/cacheflush.h>
#include <asm/processor-flags.h>
#include <asm/msr.h>
#include <asm/cmdline.h>
#include <asm/sev.h>
#include <asm/ia32.h>

#include "mm_internal.h"

/*
 * Since SME related variables are set early in the boot process they must
 * reside in the .data section so as not to be zeroed out when the .bss
 * section is later cleared.
 */
u64 sme_me_mask __section(".data") = 0;
u64 sev_status __section(".data") = 0;
u64 sev_check_data __section(".data") = 0;
EXPORT_SYMBOL(sme_me_mask);

/* Buffer used for early in-place encryption by BSP, no locking needed */
static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);

/*
 * SNP-specific routine which needs to additionally change the page state from
 * private to shared before copying the data from the source to destination and
 * restore after the copy.
 */
static inline void __init snp_memcpy(void *dst, void *src, size_t sz,
				     unsigned long paddr, bool decrypt)
{
	unsigned long npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;

	if (decrypt) {
		/*
		 * @paddr needs to be accessed decrypted, mark the page shared in
		 * the RMP table before copying it.
		 */
		early_snp_set_memory_shared((unsigned long)__va(paddr), paddr, npages);

		memcpy(dst, src, sz);

		/* Restore the page state after the memcpy. */
		early_snp_set_memory_private((unsigned long)__va(paddr), paddr, npages);
	} else {
		/*
		 * @paddr need to be accessed encrypted, no need for the page state
		 * change.
		 */
		memcpy(dst, src, sz);
	}
}

/*
 * This routine does not change the underlying encryption setting of the
 * page(s) that map this memory. It assumes that eventually the memory is
 * meant to be accessed as either encrypted or decrypted but the contents
 * are currently not in the desired state.
 *
 * This routine follows the steps outlined in the AMD64 Architecture
 * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place.
 */
static void __init __sme_early_enc_dec(resource_size_t paddr,
				       unsigned long size, bool enc)
{
	void *src, *dst;
	size_t len;

	if (!sme_me_mask)
		return;

	wbinvd();

	/*
	 * There are limited number of early mapping slots, so map (at most)
	 * one page at time.
	 */
	while (size) {
		len = min_t(size_t, sizeof(sme_early_buffer), size);

		/*
		 * Create mappings for the current and desired format of
		 * the memory. Use a write-protected mapping for the source.
		 */
		src = enc ? early_memremap_decrypted_wp(paddr, len) :
			    early_memremap_encrypted_wp(paddr, len);

		dst = enc ? early_memremap_encrypted(paddr, len) :
			    early_memremap_decrypted(paddr, len);

		/*
		 * If a mapping can't be obtained to perform the operation,
		 * then eventual access of that area in the desired mode
		 * will cause a crash.
		 */
		BUG_ON(!src || !dst);

		/*
		 * Use a temporary buffer, of cache-line multiple size, to
		 * avoid data corruption as documented in the APM.
		 */
		if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) {
			snp_memcpy(sme_early_buffer, src, len, paddr, enc);
			snp_memcpy(dst, sme_early_buffer, len, paddr, !enc);
		} else {
			memcpy(sme_early_buffer, src, len);
			memcpy(dst, sme_early_buffer, len);
		}

		early_memunmap(dst, len);
		early_memunmap(src, len);

		paddr += len;
		size -= len;
	}
}

void __init sme_early_encrypt(resource_size_t paddr, unsigned long size)
{
	__sme_early_enc_dec(paddr, size, true);
}

void __init sme_early_decrypt(resource_size_t paddr, unsigned long size)
{
	__sme_early_enc_dec(paddr, size, false);
}

static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
					     bool map)
{
	unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET;
	pmdval_t pmd_flags, pmd;

	/* Use early_pmd_flags but remove the encryption mask */
	pmd_flags = __sme_clr(early_pmd_flags);

	do {
		pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
		__early_make_pgtable((unsigned long)vaddr, pmd);

		vaddr += PMD_SIZE;
		paddr += PMD_SIZE;
		size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
	} while (size);

	flush_tlb_local();
}

void __init sme_unmap_bootdata(char *real_mode_data)
{
	struct boot_params *boot_data;
	unsigned long cmdline_paddr;

	if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
		return;

	/* Get the command line address before unmapping the real_mode_data */
	boot_data = (struct boot_params *)real_mode_data;
	cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);

	__sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false);

	if (!cmdline_paddr)
		return;

	__sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false);
}

void __init sme_map_bootdata(char *real_mode_data)
{
	struct boot_params *boot_data;
	unsigned long cmdline_paddr;

	if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
		return;

	__sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true);

	/* Get the command line address after mapping the real_mode_data */
	boot_data = (struct boot_params *)real_mode_data;
	cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);

	if (!cmdline_paddr)
		return;

	__sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
}

static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot)
{
	unsigned long pfn = 0;
	pgprot_t prot;

	switch (level) {
	case PG_LEVEL_4K:
		pfn = pte_pfn(*kpte);
		prot = pte_pgprot(*kpte);
		break;
	case PG_LEVEL_2M:
		pfn = pmd_pfn(*(pmd_t *)kpte);
		prot = pmd_pgprot(*(pmd_t *)kpte);
		break;
	case PG_LEVEL_1G:
		pfn = pud_pfn(*(pud_t *)kpte);
		prot = pud_pgprot(*(pud_t *)kpte);
		break;
	default:
		WARN_ONCE(1, "Invalid level for kpte\n");
		return 0;
	}

	if (ret_prot)
		*ret_prot = prot;

	return pfn;
}

static bool amd_enc_tlb_flush_required(bool enc)
{
	return true;
}

static bool amd_enc_cache_flush_required(void)
{
	return !cpu_feature_enabled(X86_FEATURE_SME_COHERENT);
}

static void enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
{
#ifdef CONFIG_PARAVIRT
	unsigned long vaddr_end = vaddr + size;

	while (vaddr < vaddr_end) {
		int psize, pmask, level;
		unsigned long pfn;
		pte_t *kpte;

		kpte = lookup_address(vaddr, &level);
		if (!kpte || pte_none(*kpte)) {
			WARN_ONCE(1, "kpte lookup for vaddr\n");
			return;
		}

		pfn = pg_level_to_pfn(level, kpte, NULL);
		if (!pfn)
			continue;

		psize = page_level_size(level);
		pmask = page_level_mask(level);

		notify_page_enc_status_changed(pfn, psize >> PAGE_SHIFT, enc);

		vaddr = (vaddr & pmask) + psize;
	}
#endif
}

static bool amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
{
	/*
	 * To maintain the security guarantees of SEV-SNP guests, make sure
	 * to invalidate the memory before encryption attribute is cleared.
	 */
	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc)
		snp_set_memory_shared(vaddr, npages);

	return true;
}

/* Return true unconditionally: return value doesn't matter for the SEV side */
static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc)
{
	/*
	 * After memory is mapped encrypted in the page table, validate it
	 * so that it is consistent with the page table updates.
	 */
	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && enc)
		snp_set_memory_private(vaddr, npages);

	if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
		enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc);

	return true;
}

static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
{
	pgprot_t old_prot, new_prot;
	unsigned long pfn, pa, size;
	pte_t new_pte;

	pfn = pg_level_to_pfn(level, kpte, &old_prot);
	if (!pfn)
		return;

	new_prot = old_prot;
	if (enc)
		pgprot_val(new_prot) |= _PAGE_ENC;
	else
		pgprot_val(new_prot) &= ~_PAGE_ENC;

	/* If prot is same then do nothing. */
	if (pgprot_val(old_prot) == pgprot_val(new_prot))
		return;

	pa = pfn << PAGE_SHIFT;
	size = page_level_size(level);

	/*
	 * We are going to perform in-place en-/decryption and change the
	 * physical page attribute from C=1 to C=0 or vice versa. Flush the
	 * caches to ensure that data gets accessed with the correct C-bit.
	 */
	clflush_cache_range(__va(pa), size);

	/* Encrypt/decrypt the contents in-place */
	if (enc) {
		sme_early_encrypt(pa, size);
	} else {
		sme_early_decrypt(pa, size);

		/*
		 * ON SNP, the page state in the RMP table must happen
		 * before the page table updates.
		 */
		early_snp_set_memory_shared((unsigned long)__va(pa), pa, 1);
	}

	/* Change the page encryption mask. */
	new_pte = pfn_pte(pfn, new_prot);
	set_pte_atomic(kpte, new_pte);

	/*
	 * If page is set encrypted in the page table, then update the RMP table to
	 * add this page as private.
	 */
	if (enc)
		early_snp_set_memory_private((unsigned long)__va(pa), pa, 1);
}

static int __init early_set_memory_enc_dec(unsigned long vaddr,
					   unsigned long size, bool enc)
{
	unsigned long vaddr_end, vaddr_next, start;
	unsigned long psize, pmask;
	int split_page_size_mask;
	int level, ret;
	pte_t *kpte;

	start = vaddr;
	vaddr_next = vaddr;
	vaddr_end = vaddr + size;

	for (; vaddr < vaddr_end; vaddr = vaddr_next) {
		kpte = lookup_address(vaddr, &level);
		if (!kpte || pte_none(*kpte)) {
			ret = 1;
			goto out;
		}

		if (level == PG_LEVEL_4K) {
			__set_clr_pte_enc(kpte, level, enc);
			vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE;
			continue;
		}

		psize = page_level_size(level);
		pmask = page_level_mask(level);

		/*
		 * Check whether we can change the large page in one go.
		 * We request a split when the address is not aligned and
		 * the number of pages to set/clear encryption bit is smaller
		 * than the number of pages in the large page.
		 */
		if (vaddr == (vaddr & pmask) &&
		    ((vaddr_end - vaddr) >= psize)) {
			__set_clr_pte_enc(kpte, level, enc);
			vaddr_next = (vaddr & pmask) + psize;
			continue;
		}

		/*
		 * The virtual address is part of a larger page, create the next
		 * level page table mapping (4K or 2M). If it is part of a 2M
		 * page then we request a split of the large page into 4K
		 * chunks. A 1GB large page is split into 2M pages, resp.
		 */
		if (level == PG_LEVEL_2M)
			split_page_size_mask = 0;
		else
			split_page_size_mask = 1 << PG_LEVEL_2M;

		/*
		 * kernel_physical_mapping_change() does not flush the TLBs, so
		 * a TLB flush is required after we exit from the for loop.
		 */
		kernel_physical_mapping_change(__pa(vaddr & pmask),
					       __pa((vaddr_end & pmask) + psize),
					       split_page_size_mask);
	}

	ret = 0;

	early_set_mem_enc_dec_hypercall(start, size, enc);
out:
	__flush_tlb_all();
	return ret;
}

int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size)
{
	return early_set_memory_enc_dec(vaddr, size, false);
}

int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
{
	return early_set_memory_enc_dec(vaddr, size, true);
}

void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
{
	enc_dec_hypercall(vaddr, size, enc);
}

void __init sme_early_init(void)
{
	if (!sme_me_mask)
		return;

	early_pmd_flags = __sme_set(early_pmd_flags);

	__supported_pte_mask = __sme_set(__supported_pte_mask);

	/* Update the protection map with memory encryption mask */
	add_encrypt_protection_map();

	x86_platform.guest.enc_status_change_prepare = amd_enc_status_change_prepare;
	x86_platform.guest.enc_status_change_finish  = amd_enc_status_change_finish;
	x86_platform.guest.enc_tlb_flush_required    = amd_enc_tlb_flush_required;
	x86_platform.guest.enc_cache_flush_required  = amd_enc_cache_flush_required;

	/*
	 * AMD-SEV-ES intercepts the RDMSR to read the X2APIC ID in the
	 * parallel bringup low level code. That raises #VC which cannot be
	 * handled there.
	 * It does not provide a RDMSR GHCB protocol so the early startup
	 * code cannot directly communicate with the secure firmware. The
	 * alternative solution to retrieve the APIC ID via CPUID(0xb),
	 * which is covered by the GHCB protocol, is not viable either
	 * because there is no enforcement of the CPUID(0xb) provided
	 * "initial" APIC ID to be the same as the real APIC ID.
	 * Disable parallel bootup.
	 */
	if (sev_status & MSR_AMD64_SEV_ES_ENABLED)
		x86_cpuinit.parallel_bringup = false;

	/*
	 * The VMM is capable of injecting interrupt 0x80 and triggering the
	 * compatibility syscall path.
	 *
	 * By default, the 32-bit emulation is disabled in order to ensure
	 * the safety of the VM.
	 */
	if (sev_status & MSR_AMD64_SEV_ENABLED)
		ia32_disable();

	/*
	 * Override init functions that scan the ROM region in SEV-SNP guests,
	 * as this memory is not pre-validated and would thus cause a crash.
	 */
	if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) {
		x86_init.mpparse.find_mptable = x86_init_noop;
		x86_init.pci.init_irq = x86_init_noop;
		x86_init.resources.probe_roms = x86_init_noop;

		/*
		 * DMI setup behavior for SEV-SNP guests depends on
		 * efi_enabled(EFI_CONFIG_TABLES), which hasn't been
		 * parsed yet. snp_dmi_setup() will run after that
		 * parsing has happened.
		 */
		x86_init.resources.dmi_setup = snp_dmi_setup;
	}
}

void __init mem_encrypt_free_decrypted_mem(void)
{
	unsigned long vaddr, vaddr_end, npages;
	int r;

	vaddr = (unsigned long)__start_bss_decrypted_unused;
	vaddr_end = (unsigned long)__end_bss_decrypted;
	npages = (vaddr_end - vaddr) >> PAGE_SHIFT;

	/*
	 * If the unused memory range was mapped decrypted, change the encryption
	 * attribute from decrypted to encrypted before freeing it. Base the
	 * re-encryption on the same condition used for the decryption in
	 * sme_postprocess_startup(). Higher level abstractions, such as
	 * CC_ATTR_MEM_ENCRYPT, aren't necessarily equivalent in a Hyper-V VM
	 * using vTOM, where sme_me_mask is always zero.
	 */
	if (sme_me_mask) {
		r = set_memory_encrypted(vaddr, npages);
		if (r) {
			pr_warn("failed to free unused decrypted pages\n");
			return;
		}
	}

	free_init_pages("unused decrypted", vaddr, vaddr_end);
}