initcpu.c revision 331722
1/*-
2 * Copyright (c) KATO Takenori, 1997, 1998.
3 *
4 * All rights reserved.  Unpublished rights reserved under the copyright
5 * laws of Japan.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer as
13 *    the first lines of this file unmodified.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: stable/11/sys/amd64/amd64/initcpu.c 331722 2018-03-29 02:50:57Z eadler $");
32
33#include "opt_cpu.h"
34
35#include <sys/param.h>
36#include <sys/kernel.h>
37#include <sys/pcpu.h>
38#include <sys/systm.h>
39#include <sys/sysctl.h>
40
41#include <machine/cputypes.h>
42#include <machine/md_var.h>
43#include <machine/specialreg.h>
44
45#include <vm/vm.h>
46#include <vm/pmap.h>
47
48static int	hw_instruction_sse;
49SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
50    &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
51static int	lower_sharedpage_init;
52int		hw_lower_amd64_sharedpage;
53SYSCTL_INT(_hw, OID_AUTO, lower_amd64_sharedpage, CTLFLAG_RDTUN,
54    &hw_lower_amd64_sharedpage, 0,
55   "Lower sharedpage to work around Ryzen issue with executing code near the top of user memory");
56/*
57 * -1: automatic (default)
58 *  0: keep enable CLFLUSH
59 *  1: force disable CLFLUSH
60 */
61static int	hw_clflush_disable = -1;
62
63static void
64init_amd(void)
65{
66	uint64_t msr;
67
68	/*
69	 * Work around Erratum 721 for Family 10h and 12h processors.
70	 * These processors may incorrectly update the stack pointer
71	 * after a long series of push and/or near-call instructions,
72	 * or a long series of pop and/or near-return instructions.
73	 *
74	 * http://support.amd.com/us/Processor_TechDocs/41322_10h_Rev_Gd.pdf
75	 * http://support.amd.com/us/Processor_TechDocs/44739_12h_Rev_Gd.pdf
76	 *
77	 * Hypervisors do not provide access to the errata MSR,
78	 * causing #GP exception on attempt to apply the errata.  The
79	 * MSR write shall be done on host and persist globally
80	 * anyway, so do not try to do it when under virtualization.
81	 */
82	switch (CPUID_TO_FAMILY(cpu_id)) {
83	case 0x10:
84	case 0x12:
85		if ((cpu_feature2 & CPUID2_HV) == 0)
86			wrmsr(0xc0011029, rdmsr(0xc0011029) | 1);
87		break;
88	}
89
90	/*
91	 * BIOS may fail to set InitApicIdCpuIdLo to 1 as it should per BKDG.
92	 * So, do it here or otherwise some tools could be confused by
93	 * Initial Local APIC ID reported with CPUID Function 1 in EBX.
94	 */
95	if (CPUID_TO_FAMILY(cpu_id) == 0x10) {
96		if ((cpu_feature2 & CPUID2_HV) == 0) {
97			msr = rdmsr(MSR_NB_CFG1);
98			msr |= (uint64_t)1 << 54;
99			wrmsr(MSR_NB_CFG1, msr);
100		}
101	}
102
103	/*
104	 * BIOS may configure Family 10h processors to convert WC+ cache type
105	 * to CD.  That can hurt performance of guest VMs using nested paging.
106	 * The relevant MSR bit is not documented in the BKDG,
107	 * the fix is borrowed from Linux.
108	 */
109	if (CPUID_TO_FAMILY(cpu_id) == 0x10) {
110		if ((cpu_feature2 & CPUID2_HV) == 0) {
111			msr = rdmsr(0xc001102a);
112			msr &= ~((uint64_t)1 << 24);
113			wrmsr(0xc001102a, msr);
114		}
115	}
116
117	/*
118	 * Work around Erratum 793: Specific Combination of Writes to Write
119	 * Combined Memory Types and Locked Instructions May Cause Core Hang.
120	 * See Revision Guide for AMD Family 16h Models 00h-0Fh Processors,
121	 * revision 3.04 or later, publication 51810.
122	 */
123	if (CPUID_TO_FAMILY(cpu_id) == 0x16 && CPUID_TO_MODEL(cpu_id) <= 0xf) {
124		if ((cpu_feature2 & CPUID2_HV) == 0) {
125			msr = rdmsr(0xc0011020);
126			msr |= (uint64_t)1 << 15;
127			wrmsr(0xc0011020, msr);
128		}
129	}
130
131	/*
132	 * Work around a problem on Ryzen that is triggered by executing
133	 * code near the top of user memory, in our case the signal
134	 * trampoline code in the shared page on amd64.
135	 *
136	 * This function is executed once for the BSP before tunables take
137	 * effect so the value determined here can be overridden by the
138	 * tunable.  This function is then executed again for each AP and
139	 * also on resume.  Set a flag the first time so that value set by
140	 * the tunable is not overwritten.
141	 *
142	 * The stepping and/or microcode versions should be checked after
143	 * this issue is fixed by AMD so that we don't use this mode if not
144	 * needed.
145	 */
146	if (lower_sharedpage_init == 0) {
147		lower_sharedpage_init = 1;
148		if (CPUID_TO_FAMILY(cpu_id) == 0x17) {
149			hw_lower_amd64_sharedpage = 1;
150		}
151	}
152}
153
154/*
155 * Initialize special VIA features
156 */
157static void
158init_via(void)
159{
160	u_int regs[4], val;
161
162	/*
163	 * Check extended CPUID for PadLock features.
164	 *
165	 * http://www.via.com.tw/en/downloads/whitepapers/initiatives/padlock/programming_guide.pdf
166	 */
167	do_cpuid(0xc0000000, regs);
168	if (regs[0] >= 0xc0000001) {
169		do_cpuid(0xc0000001, regs);
170		val = regs[3];
171	} else
172		return;
173
174	/* Enable RNG if present. */
175	if ((val & VIA_CPUID_HAS_RNG) != 0) {
176		via_feature_rng = VIA_HAS_RNG;
177		wrmsr(0x110B, rdmsr(0x110B) | VIA_CPUID_DO_RNG);
178	}
179
180	/* Enable PadLock if present. */
181	if ((val & VIA_CPUID_HAS_ACE) != 0)
182		via_feature_xcrypt |= VIA_HAS_AES;
183	if ((val & VIA_CPUID_HAS_ACE2) != 0)
184		via_feature_xcrypt |= VIA_HAS_AESCTR;
185	if ((val & VIA_CPUID_HAS_PHE) != 0)
186		via_feature_xcrypt |= VIA_HAS_SHA;
187	if ((val & VIA_CPUID_HAS_PMM) != 0)
188		via_feature_xcrypt |= VIA_HAS_MM;
189	if (via_feature_xcrypt != 0)
190		wrmsr(0x1107, rdmsr(0x1107) | (1 << 28));
191}
192
193/*
194 * Initialize CPU control registers
195 */
196void
197initializecpu(void)
198{
199	uint64_t msr;
200	uint32_t cr4;
201
202	cr4 = rcr4();
203	if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
204		cr4 |= CR4_FXSR | CR4_XMM;
205		cpu_fxsr = hw_instruction_sse = 1;
206	}
207	if (cpu_stdext_feature & CPUID_STDEXT_FSGSBASE)
208		cr4 |= CR4_FSGSBASE;
209
210	/*
211	 * Postpone enabling the SMEP on the boot CPU until the page
212	 * tables are switched from the boot loader identity mapping
213	 * to the kernel tables.  The boot loader enables the U bit in
214	 * its tables.
215	 */
216	if (!IS_BSP() && (cpu_stdext_feature & CPUID_STDEXT_SMEP))
217		cr4 |= CR4_SMEP;
218	load_cr4(cr4);
219	if ((amd_feature & AMDID_NX) != 0) {
220		msr = rdmsr(MSR_EFER) | EFER_NXE;
221		wrmsr(MSR_EFER, msr);
222		pg_nx = PG_NX;
223	}
224	hw_ibrs_recalculate();
225	switch (cpu_vendor_id) {
226	case CPU_VENDOR_AMD:
227		init_amd();
228		break;
229	case CPU_VENDOR_CENTAUR:
230		init_via();
231		break;
232	}
233}
234
235void
236initializecpucache(void)
237{
238
239	/*
240	 * CPUID with %eax = 1, %ebx returns
241	 * Bits 15-8: CLFLUSH line size
242	 * 	(Value * 8 = cache line size in bytes)
243	 */
244	if ((cpu_feature & CPUID_CLFSH) != 0)
245		cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
246	/*
247	 * XXXKIB: (temporary) hack to work around traps generated
248	 * when CLFLUSHing APIC register window under virtualization
249	 * environments.  These environments tend to disable the
250	 * CPUID_SS feature even though the native CPU supports it.
251	 */
252	TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
253	if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1) {
254		cpu_feature &= ~CPUID_CLFSH;
255		cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
256	}
257
258	/*
259	 * The kernel's use of CLFLUSH{,OPT} can be disabled manually
260	 * by setting the hw.clflush_disable tunable.
261	 */
262	if (hw_clflush_disable == 1) {
263		cpu_feature &= ~CPUID_CLFSH;
264		cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
265	}
266}
267