1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * AMD Memory Encryption Support
4 *
5 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 */
9
10#define DISABLE_BRANCH_PROFILING
11
12/*
13 * Since we're dealing with identity mappings, physical and virtual
14 * addresses are the same, so override these defines which are ultimately
15 * used by the headers in misc.h.
16 */
17#define __pa(x)  ((unsigned long)(x))
18#define __va(x)  ((void *)((unsigned long)(x)))
19
20/*
21 * Special hack: we have to be careful, because no indirections are
22 * allowed here, and paravirt_ops is a kind of one. As it will only run in
23 * baremetal anyway, we just keep it from happening. (This list needs to
24 * be extended when new paravirt and debugging variants are added.)
25 */
26#undef CONFIG_PARAVIRT
27#undef CONFIG_PARAVIRT_XXL
28#undef CONFIG_PARAVIRT_SPINLOCKS
29
30/*
31 * This code runs before CPU feature bits are set. By default, the
32 * pgtable_l5_enabled() function uses bit X86_FEATURE_LA57 to determine if
33 * 5-level paging is active, so that won't work here. USE_EARLY_PGTABLE_L5
34 * is provided to handle this situation and, instead, use a variable that
35 * has been set by the early boot code.
36 */
37#define USE_EARLY_PGTABLE_L5
38
39#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/mem_encrypt.h>
42#include <linux/cc_platform.h>
43
44#include <asm/init.h>
45#include <asm/setup.h>
46#include <asm/sections.h>
47#include <asm/coco.h>
48#include <asm/sev.h>
49
50#include "mm_internal.h"
51
52#define PGD_FLAGS		_KERNPG_TABLE_NOENC
53#define P4D_FLAGS		_KERNPG_TABLE_NOENC
54#define PUD_FLAGS		_KERNPG_TABLE_NOENC
55#define PMD_FLAGS		_KERNPG_TABLE_NOENC
56
57#define PMD_FLAGS_LARGE		(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
58
59#define PMD_FLAGS_DEC		PMD_FLAGS_LARGE
60#define PMD_FLAGS_DEC_WP	((PMD_FLAGS_DEC & ~_PAGE_LARGE_CACHE_MASK) | \
61				 (_PAGE_PAT_LARGE | _PAGE_PWT))
62
63#define PMD_FLAGS_ENC		(PMD_FLAGS_LARGE | _PAGE_ENC)
64
65#define PTE_FLAGS		(__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
66
67#define PTE_FLAGS_DEC		PTE_FLAGS
68#define PTE_FLAGS_DEC_WP	((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
69				 (_PAGE_PAT | _PAGE_PWT))
70
71#define PTE_FLAGS_ENC		(PTE_FLAGS | _PAGE_ENC)
72
73struct sme_populate_pgd_data {
74	void    *pgtable_area;
75	pgd_t   *pgd;
76
77	pmdval_t pmd_flags;
78	pteval_t pte_flags;
79	unsigned long paddr;
80
81	unsigned long vaddr;
82	unsigned long vaddr_end;
83};
84
85/*
86 * This work area lives in the .init.scratch section, which lives outside of
87 * the kernel proper. It is sized to hold the intermediate copy buffer and
88 * more than enough pagetable pages.
89 *
90 * By using this section, the kernel can be encrypted in place and it
91 * avoids any possibility of boot parameters or initramfs images being
92 * placed such that the in-place encryption logic overwrites them.  This
93 * section is 2MB aligned to allow for simple pagetable setup using only
94 * PMD entries (see vmlinux.lds.S).
95 */
96static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch");
97
98static void __head sme_clear_pgd(struct sme_populate_pgd_data *ppd)
99{
100	unsigned long pgd_start, pgd_end, pgd_size;
101	pgd_t *pgd_p;
102
103	pgd_start = ppd->vaddr & PGDIR_MASK;
104	pgd_end = ppd->vaddr_end & PGDIR_MASK;
105
106	pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
107
108	pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
109
110	memset(pgd_p, 0, pgd_size);
111}
112
113static pud_t __head *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
114{
115	pgd_t *pgd;
116	p4d_t *p4d;
117	pud_t *pud;
118	pmd_t *pmd;
119
120	pgd = ppd->pgd + pgd_index(ppd->vaddr);
121	if (pgd_none(*pgd)) {
122		p4d = ppd->pgtable_area;
123		memset(p4d, 0, sizeof(*p4d) * PTRS_PER_P4D);
124		ppd->pgtable_area += sizeof(*p4d) * PTRS_PER_P4D;
125		set_pgd(pgd, __pgd(PGD_FLAGS | __pa(p4d)));
126	}
127
128	p4d = p4d_offset(pgd, ppd->vaddr);
129	if (p4d_none(*p4d)) {
130		pud = ppd->pgtable_area;
131		memset(pud, 0, sizeof(*pud) * PTRS_PER_PUD);
132		ppd->pgtable_area += sizeof(*pud) * PTRS_PER_PUD;
133		set_p4d(p4d, __p4d(P4D_FLAGS | __pa(pud)));
134	}
135
136	pud = pud_offset(p4d, ppd->vaddr);
137	if (pud_none(*pud)) {
138		pmd = ppd->pgtable_area;
139		memset(pmd, 0, sizeof(*pmd) * PTRS_PER_PMD);
140		ppd->pgtable_area += sizeof(*pmd) * PTRS_PER_PMD;
141		set_pud(pud, __pud(PUD_FLAGS | __pa(pmd)));
142	}
143
144	if (pud_leaf(*pud))
145		return NULL;
146
147	return pud;
148}
149
150static void __head sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
151{
152	pud_t *pud;
153	pmd_t *pmd;
154
155	pud = sme_prepare_pgd(ppd);
156	if (!pud)
157		return;
158
159	pmd = pmd_offset(pud, ppd->vaddr);
160	if (pmd_leaf(*pmd))
161		return;
162
163	set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
164}
165
166static void __head sme_populate_pgd(struct sme_populate_pgd_data *ppd)
167{
168	pud_t *pud;
169	pmd_t *pmd;
170	pte_t *pte;
171
172	pud = sme_prepare_pgd(ppd);
173	if (!pud)
174		return;
175
176	pmd = pmd_offset(pud, ppd->vaddr);
177	if (pmd_none(*pmd)) {
178		pte = ppd->pgtable_area;
179		memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
180		ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
181		set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
182	}
183
184	if (pmd_leaf(*pmd))
185		return;
186
187	pte = pte_offset_kernel(pmd, ppd->vaddr);
188	if (pte_none(*pte))
189		set_pte(pte, __pte(ppd->paddr | ppd->pte_flags));
190}
191
192static void __head __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
193{
194	while (ppd->vaddr < ppd->vaddr_end) {
195		sme_populate_pgd_large(ppd);
196
197		ppd->vaddr += PMD_SIZE;
198		ppd->paddr += PMD_SIZE;
199	}
200}
201
202static void __head __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
203{
204	while (ppd->vaddr < ppd->vaddr_end) {
205		sme_populate_pgd(ppd);
206
207		ppd->vaddr += PAGE_SIZE;
208		ppd->paddr += PAGE_SIZE;
209	}
210}
211
212static void __head __sme_map_range(struct sme_populate_pgd_data *ppd,
213				   pmdval_t pmd_flags, pteval_t pte_flags)
214{
215	unsigned long vaddr_end;
216
217	ppd->pmd_flags = pmd_flags;
218	ppd->pte_flags = pte_flags;
219
220	/* Save original end value since we modify the struct value */
221	vaddr_end = ppd->vaddr_end;
222
223	/* If start is not 2MB aligned, create PTE entries */
224	ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_SIZE);
225	__sme_map_range_pte(ppd);
226
227	/* Create PMD entries */
228	ppd->vaddr_end = vaddr_end & PMD_MASK;
229	__sme_map_range_pmd(ppd);
230
231	/* If end is not 2MB aligned, create PTE entries */
232	ppd->vaddr_end = vaddr_end;
233	__sme_map_range_pte(ppd);
234}
235
236static void __head sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
237{
238	__sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
239}
240
241static void __head sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
242{
243	__sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
244}
245
246static void __head sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
247{
248	__sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
249}
250
251static unsigned long __head sme_pgtable_calc(unsigned long len)
252{
253	unsigned long entries = 0, tables = 0;
254
255	/*
256	 * Perform a relatively simplistic calculation of the pagetable
257	 * entries that are needed. Those mappings will be covered mostly
258	 * by 2MB PMD entries so we can conservatively calculate the required
259	 * number of P4D, PUD and PMD structures needed to perform the
260	 * mappings.  For mappings that are not 2MB aligned, PTE mappings
261	 * would be needed for the start and end portion of the address range
262	 * that fall outside of the 2MB alignment.  This results in, at most,
263	 * two extra pages to hold PTE entries for each range that is mapped.
264	 * Incrementing the count for each covers the case where the addresses
265	 * cross entries.
266	 */
267
268	/* PGDIR_SIZE is equal to P4D_SIZE on 4-level machine. */
269	if (PTRS_PER_P4D > 1)
270		entries += (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D;
271	entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD;
272	entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD;
273	entries += 2 * sizeof(pte_t) * PTRS_PER_PTE;
274
275	/*
276	 * Now calculate the added pagetable structures needed to populate
277	 * the new pagetables.
278	 */
279
280	if (PTRS_PER_P4D > 1)
281		tables += DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D;
282	tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD;
283	tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD;
284
285	return entries + tables;
286}
287
288void __head sme_encrypt_kernel(struct boot_params *bp)
289{
290	unsigned long workarea_start, workarea_end, workarea_len;
291	unsigned long execute_start, execute_end, execute_len;
292	unsigned long kernel_start, kernel_end, kernel_len;
293	unsigned long initrd_start, initrd_end, initrd_len;
294	struct sme_populate_pgd_data ppd;
295	unsigned long pgtable_area_len;
296	unsigned long decrypted_base;
297
298	/*
299	 * This is early code, use an open coded check for SME instead of
300	 * using cc_platform_has(). This eliminates worries about removing
301	 * instrumentation or checking boot_cpu_data in the cc_platform_has()
302	 * function.
303	 */
304	if (!sme_get_me_mask() ||
305	    RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED)
306		return;
307
308	/*
309	 * Prepare for encrypting the kernel and initrd by building new
310	 * pagetables with the necessary attributes needed to encrypt the
311	 * kernel in place.
312	 *
313	 *   One range of virtual addresses will map the memory occupied
314	 *   by the kernel and initrd as encrypted.
315	 *
316	 *   Another range of virtual addresses will map the memory occupied
317	 *   by the kernel and initrd as decrypted and write-protected.
318	 *
319	 *     The use of write-protect attribute will prevent any of the
320	 *     memory from being cached.
321	 */
322
323	kernel_start = (unsigned long)RIP_REL_REF(_text);
324	kernel_end = ALIGN((unsigned long)RIP_REL_REF(_end), PMD_SIZE);
325	kernel_len = kernel_end - kernel_start;
326
327	initrd_start = 0;
328	initrd_end = 0;
329	initrd_len = 0;
330#ifdef CONFIG_BLK_DEV_INITRD
331	initrd_len = (unsigned long)bp->hdr.ramdisk_size |
332		     ((unsigned long)bp->ext_ramdisk_size << 32);
333	if (initrd_len) {
334		initrd_start = (unsigned long)bp->hdr.ramdisk_image |
335			       ((unsigned long)bp->ext_ramdisk_image << 32);
336		initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
337		initrd_len = initrd_end - initrd_start;
338	}
339#endif
340
341	/*
342	 * Calculate required number of workarea bytes needed:
343	 *   executable encryption area size:
344	 *     stack page (PAGE_SIZE)
345	 *     encryption routine page (PAGE_SIZE)
346	 *     intermediate copy buffer (PMD_SIZE)
347	 *   pagetable structures for the encryption of the kernel
348	 *   pagetable structures for workarea (in case not currently mapped)
349	 */
350	execute_start = workarea_start = (unsigned long)RIP_REL_REF(sme_workarea);
351	execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE;
352	execute_len = execute_end - execute_start;
353
354	/*
355	 * One PGD for both encrypted and decrypted mappings and a set of
356	 * PUDs and PMDs for each of the encrypted and decrypted mappings.
357	 */
358	pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
359	pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
360	if (initrd_len)
361		pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
362
363	/* PUDs and PMDs needed in the current pagetables for the workarea */
364	pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
365
366	/*
367	 * The total workarea includes the executable encryption area and
368	 * the pagetable area. The start of the workarea is already 2MB
369	 * aligned, align the end of the workarea on a 2MB boundary so that
370	 * we don't try to create/allocate PTE entries from the workarea
371	 * before it is mapped.
372	 */
373	workarea_len = execute_len + pgtable_area_len;
374	workarea_end = ALIGN(workarea_start + workarea_len, PMD_SIZE);
375
376	/*
377	 * Set the address to the start of where newly created pagetable
378	 * structures (PGDs, PUDs and PMDs) will be allocated. New pagetable
379	 * structures are created when the workarea is added to the current
380	 * pagetables and when the new encrypted and decrypted kernel
381	 * mappings are populated.
382	 */
383	ppd.pgtable_area = (void *)execute_end;
384
385	/*
386	 * Make sure the current pagetable structure has entries for
387	 * addressing the workarea.
388	 */
389	ppd.pgd = (pgd_t *)native_read_cr3_pa();
390	ppd.paddr = workarea_start;
391	ppd.vaddr = workarea_start;
392	ppd.vaddr_end = workarea_end;
393	sme_map_range_decrypted(&ppd);
394
395	/* Flush the TLB - no globals so cr3 is enough */
396	native_write_cr3(__native_read_cr3());
397
398	/*
399	 * A new pagetable structure is being built to allow for the kernel
400	 * and initrd to be encrypted. It starts with an empty PGD that will
401	 * then be populated with new PUDs and PMDs as the encrypted and
402	 * decrypted kernel mappings are created.
403	 */
404	ppd.pgd = ppd.pgtable_area;
405	memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
406	ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
407
408	/*
409	 * A different PGD index/entry must be used to get different
410	 * pagetable entries for the decrypted mapping. Choose the next
411	 * PGD index and convert it to a virtual address to be used as
412	 * the base of the mapping.
413	 */
414	decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
415	if (initrd_len) {
416		unsigned long check_base;
417
418		check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
419		decrypted_base = max(decrypted_base, check_base);
420	}
421	decrypted_base <<= PGDIR_SHIFT;
422
423	/* Add encrypted kernel (identity) mappings */
424	ppd.paddr = kernel_start;
425	ppd.vaddr = kernel_start;
426	ppd.vaddr_end = kernel_end;
427	sme_map_range_encrypted(&ppd);
428
429	/* Add decrypted, write-protected kernel (non-identity) mappings */
430	ppd.paddr = kernel_start;
431	ppd.vaddr = kernel_start + decrypted_base;
432	ppd.vaddr_end = kernel_end + decrypted_base;
433	sme_map_range_decrypted_wp(&ppd);
434
435	if (initrd_len) {
436		/* Add encrypted initrd (identity) mappings */
437		ppd.paddr = initrd_start;
438		ppd.vaddr = initrd_start;
439		ppd.vaddr_end = initrd_end;
440		sme_map_range_encrypted(&ppd);
441		/*
442		 * Add decrypted, write-protected initrd (non-identity) mappings
443		 */
444		ppd.paddr = initrd_start;
445		ppd.vaddr = initrd_start + decrypted_base;
446		ppd.vaddr_end = initrd_end + decrypted_base;
447		sme_map_range_decrypted_wp(&ppd);
448	}
449
450	/* Add decrypted workarea mappings to both kernel mappings */
451	ppd.paddr = workarea_start;
452	ppd.vaddr = workarea_start;
453	ppd.vaddr_end = workarea_end;
454	sme_map_range_decrypted(&ppd);
455
456	ppd.paddr = workarea_start;
457	ppd.vaddr = workarea_start + decrypted_base;
458	ppd.vaddr_end = workarea_end + decrypted_base;
459	sme_map_range_decrypted(&ppd);
460
461	/* Perform the encryption */
462	sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
463			    kernel_len, workarea_start, (unsigned long)ppd.pgd);
464
465	if (initrd_len)
466		sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
467				    initrd_len, workarea_start,
468				    (unsigned long)ppd.pgd);
469
470	/*
471	 * At this point we are running encrypted.  Remove the mappings for
472	 * the decrypted areas - all that is needed for this is to remove
473	 * the PGD entry/entries.
474	 */
475	ppd.vaddr = kernel_start + decrypted_base;
476	ppd.vaddr_end = kernel_end + decrypted_base;
477	sme_clear_pgd(&ppd);
478
479	if (initrd_len) {
480		ppd.vaddr = initrd_start + decrypted_base;
481		ppd.vaddr_end = initrd_end + decrypted_base;
482		sme_clear_pgd(&ppd);
483	}
484
485	ppd.vaddr = workarea_start + decrypted_base;
486	ppd.vaddr_end = workarea_end + decrypted_base;
487	sme_clear_pgd(&ppd);
488
489	/* Flush the TLB - no globals so cr3 is enough */
490	native_write_cr3(__native_read_cr3());
491}
492
493void __head sme_enable(struct boot_params *bp)
494{
495	unsigned int eax, ebx, ecx, edx;
496	unsigned long feature_mask;
497	unsigned long me_mask;
498	bool snp;
499	u64 msr;
500
501	snp = snp_init(bp);
502
503	/* Check for the SME/SEV support leaf */
504	eax = 0x80000000;
505	ecx = 0;
506	native_cpuid(&eax, &ebx, &ecx, &edx);
507	if (eax < 0x8000001f)
508		return;
509
510#define AMD_SME_BIT	BIT(0)
511#define AMD_SEV_BIT	BIT(1)
512
513	/*
514	 * Check for the SME/SEV feature:
515	 *   CPUID Fn8000_001F[EAX]
516	 *   - Bit 0 - Secure Memory Encryption support
517	 *   - Bit 1 - Secure Encrypted Virtualization support
518	 *   CPUID Fn8000_001F[EBX]
519	 *   - Bits 5:0 - Pagetable bit position used to indicate encryption
520	 */
521	eax = 0x8000001f;
522	ecx = 0;
523	native_cpuid(&eax, &ebx, &ecx, &edx);
524	/* Check whether SEV or SME is supported */
525	if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT)))
526		return;
527
528	me_mask = 1UL << (ebx & 0x3f);
529
530	/* Check the SEV MSR whether SEV or SME is enabled */
531	RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
532	feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
533
534	/* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */
535	if (snp && !(msr & MSR_AMD64_SEV_SNP_ENABLED))
536		snp_abort();
537
538	/* Check if memory encryption is enabled */
539	if (feature_mask == AMD_SME_BIT) {
540		if (!(bp->hdr.xloadflags & XLF_MEM_ENCRYPTION))
541			return;
542
543		/*
544		 * No SME if Hypervisor bit is set. This check is here to
545		 * prevent a guest from trying to enable SME. For running as a
546		 * KVM guest the MSR_AMD64_SYSCFG will be sufficient, but there
547		 * might be other hypervisors which emulate that MSR as non-zero
548		 * or even pass it through to the guest.
549		 * A malicious hypervisor can still trick a guest into this
550		 * path, but there is no way to protect against that.
551		 */
552		eax = 1;
553		ecx = 0;
554		native_cpuid(&eax, &ebx, &ecx, &edx);
555		if (ecx & BIT(31))
556			return;
557
558		/* For SME, check the SYSCFG MSR */
559		msr = __rdmsr(MSR_AMD64_SYSCFG);
560		if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
561			return;
562	}
563
564	RIP_REL_REF(sme_me_mask) = me_mask;
565	physical_mask &= ~me_mask;
566	cc_vendor = CC_VENDOR_AMD;
567	cc_set_mask(me_mask);
568}
569