1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/export.h>
3#include <linux/bitops.h>
4#include <linux/elf.h>
5#include <linux/mm.h>
6
7#include <linux/io.h>
8#include <linux/sched.h>
9#include <linux/sched/clock.h>
10#include <linux/random.h>
11#include <linux/topology.h>
12#include <asm/processor.h>
13#include <asm/apic.h>
14#include <asm/cacheinfo.h>
15#include <asm/cpu.h>
16#include <asm/spec-ctrl.h>
17#include <asm/smp.h>
18#include <asm/numa.h>
19#include <asm/pci-direct.h>
20#include <asm/delay.h>
21#include <asm/debugreg.h>
22#include <asm/resctrl.h>
23#include <asm/sev.h>
24
25#ifdef CONFIG_X86_64
26# include <asm/mmconfig.h>
27#endif
28
29#include "cpu.h"
30
31static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
32{
33	u32 gprs[8] = { 0 };
34	int err;
35
36	WARN_ONCE((boot_cpu_data.x86 != 0xf),
37		  "%s should only be used on K8!\n", __func__);
38
39	gprs[1] = msr;
40	gprs[7] = 0x9c5a203a;
41
42	err = rdmsr_safe_regs(gprs);
43
44	*p = gprs[0] | ((u64)gprs[2] << 32);
45
46	return err;
47}
48
49static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
50{
51	u32 gprs[8] = { 0 };
52
53	WARN_ONCE((boot_cpu_data.x86 != 0xf),
54		  "%s should only be used on K8!\n", __func__);
55
56	gprs[0] = (u32)val;
57	gprs[1] = msr;
58	gprs[2] = val >> 32;
59	gprs[7] = 0x9c5a203a;
60
61	return wrmsr_safe_regs(gprs);
62}
63
64/*
65 *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
66 *	misexecution of code under Linux. Owners of such processors should
67 *	contact AMD for precise details and a CPU swap.
68 *
69 *	See	http://www.multimania.com/poulot/k6bug.html
70 *	and	section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
71 *		(Publication # 21266  Issue Date: August 1998)
72 *
73 *	The following test is erm.. interesting. AMD neglected to up
74 *	the chip setting when fixing the bug but they also tweaked some
75 *	performance at the same time..
76 */
77
78#ifdef CONFIG_X86_32
79extern __visible void vide(void);
80__asm__(".text\n"
81	".globl vide\n"
82	".type vide, @function\n"
83	".align 4\n"
84	"vide: ret\n");
85#endif
86
87static void init_amd_k5(struct cpuinfo_x86 *c)
88{
89#ifdef CONFIG_X86_32
90/*
91 * General Systems BIOSen alias the cpu frequency registers
92 * of the Elan at 0x000df000. Unfortunately, one of the Linux
93 * drivers subsequently pokes it, and changes the CPU speed.
94 * Workaround : Remove the unneeded alias.
95 */
96#define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
97#define CBAR_ENB	(0x80000000)
98#define CBAR_KEY	(0X000000CB)
99	if (c->x86_model == 9 || c->x86_model == 10) {
100		if (inl(CBAR) & CBAR_ENB)
101			outl(0 | CBAR_KEY, CBAR);
102	}
103#endif
104}
105
106static void init_amd_k6(struct cpuinfo_x86 *c)
107{
108#ifdef CONFIG_X86_32
109	u32 l, h;
110	int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
111
112	if (c->x86_model < 6) {
113		/* Based on AMD doc 20734R - June 2000 */
114		if (c->x86_model == 0) {
115			clear_cpu_cap(c, X86_FEATURE_APIC);
116			set_cpu_cap(c, X86_FEATURE_PGE);
117		}
118		return;
119	}
120
121	if (c->x86_model == 6 && c->x86_stepping == 1) {
122		const int K6_BUG_LOOP = 1000000;
123		int n;
124		void (*f_vide)(void);
125		u64 d, d2;
126
127		pr_info("AMD K6 stepping B detected - ");
128
129		/*
130		 * It looks like AMD fixed the 2.6.2 bug and improved indirect
131		 * calls at the same time.
132		 */
133
134		n = K6_BUG_LOOP;
135		f_vide = vide;
136		OPTIMIZER_HIDE_VAR(f_vide);
137		d = rdtsc();
138		while (n--)
139			f_vide();
140		d2 = rdtsc();
141		d = d2-d;
142
143		if (d > 20*K6_BUG_LOOP)
144			pr_cont("system stability may be impaired when more than 32 MB are used.\n");
145		else
146			pr_cont("probably OK (after B9730xxxx).\n");
147	}
148
149	/* K6 with old style WHCR */
150	if (c->x86_model < 8 ||
151	   (c->x86_model == 8 && c->x86_stepping < 8)) {
152		/* We can only write allocate on the low 508Mb */
153		if (mbytes > 508)
154			mbytes = 508;
155
156		rdmsr(MSR_K6_WHCR, l, h);
157		if ((l&0x0000FFFF) == 0) {
158			unsigned long flags;
159			l = (1<<0)|((mbytes/4)<<1);
160			local_irq_save(flags);
161			wbinvd();
162			wrmsr(MSR_K6_WHCR, l, h);
163			local_irq_restore(flags);
164			pr_info("Enabling old style K6 write allocation for %d Mb\n",
165				mbytes);
166		}
167		return;
168	}
169
170	if ((c->x86_model == 8 && c->x86_stepping > 7) ||
171	     c->x86_model == 9 || c->x86_model == 13) {
172		/* The more serious chips .. */
173
174		if (mbytes > 4092)
175			mbytes = 4092;
176
177		rdmsr(MSR_K6_WHCR, l, h);
178		if ((l&0xFFFF0000) == 0) {
179			unsigned long flags;
180			l = ((mbytes>>2)<<22)|(1<<16);
181			local_irq_save(flags);
182			wbinvd();
183			wrmsr(MSR_K6_WHCR, l, h);
184			local_irq_restore(flags);
185			pr_info("Enabling new style K6 write allocation for %d Mb\n",
186				mbytes);
187		}
188
189		return;
190	}
191
192	if (c->x86_model == 10) {
193		/* AMD Geode LX is model 10 */
194		/* placeholder for any needed mods */
195		return;
196	}
197#endif
198}
199
200static void init_amd_k7(struct cpuinfo_x86 *c)
201{
202#ifdef CONFIG_X86_32
203	u32 l, h;
204
205	/*
206	 * Bit 15 of Athlon specific MSR 15, needs to be 0
207	 * to enable SSE on Palomino/Morgan/Barton CPU's.
208	 * If the BIOS didn't enable it already, enable it here.
209	 */
210	if (c->x86_model >= 6 && c->x86_model <= 10) {
211		if (!cpu_has(c, X86_FEATURE_XMM)) {
212			pr_info("Enabling disabled K7/SSE Support.\n");
213			msr_clear_bit(MSR_K7_HWCR, 15);
214			set_cpu_cap(c, X86_FEATURE_XMM);
215		}
216	}
217
218	/*
219	 * It's been determined by AMD that Athlons since model 8 stepping 1
220	 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
221	 * As per AMD technical note 27212 0.2
222	 */
223	if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
224		rdmsr(MSR_K7_CLK_CTL, l, h);
225		if ((l & 0xfff00000) != 0x20000000) {
226			pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
227				l, ((l & 0x000fffff)|0x20000000));
228			wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
229		}
230	}
231
232	/* calling is from identify_secondary_cpu() ? */
233	if (!c->cpu_index)
234		return;
235
236	/*
237	 * Certain Athlons might work (for various values of 'work') in SMP
238	 * but they are not certified as MP capable.
239	 */
240	/* Athlon 660/661 is valid. */
241	if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
242	    (c->x86_stepping == 1)))
243		return;
244
245	/* Duron 670 is valid */
246	if ((c->x86_model == 7) && (c->x86_stepping == 0))
247		return;
248
249	/*
250	 * Athlon 662, Duron 671, and Athlon >model 7 have capability
251	 * bit. It's worth noting that the A5 stepping (662) of some
252	 * Athlon XP's have the MP bit set.
253	 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
254	 * more.
255	 */
256	if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
257	    ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
258	     (c->x86_model > 7))
259		if (cpu_has(c, X86_FEATURE_MP))
260			return;
261
262	/* If we get here, not a certified SMP capable AMD system. */
263
264	/*
265	 * Don't taint if we are running SMP kernel on a single non-MP
266	 * approved Athlon
267	 */
268	WARN_ONCE(1, "WARNING: This combination of AMD"
269		" processors is not suitable for SMP.\n");
270	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
271#endif
272}
273
274#ifdef CONFIG_NUMA
275/*
276 * To workaround broken NUMA config.  Read the comment in
277 * srat_detect_node().
278 */
279static int nearby_node(int apicid)
280{
281	int i, node;
282
283	for (i = apicid - 1; i >= 0; i--) {
284		node = __apicid_to_node[i];
285		if (node != NUMA_NO_NODE && node_online(node))
286			return node;
287	}
288	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
289		node = __apicid_to_node[i];
290		if (node != NUMA_NO_NODE && node_online(node))
291			return node;
292	}
293	return first_node(node_online_map); /* Shouldn't happen */
294}
295#endif
296
297static void srat_detect_node(struct cpuinfo_x86 *c)
298{
299#ifdef CONFIG_NUMA
300	int cpu = smp_processor_id();
301	int node;
302	unsigned apicid = c->topo.apicid;
303
304	node = numa_cpu_node(cpu);
305	if (node == NUMA_NO_NODE)
306		node = per_cpu_llc_id(cpu);
307
308	/*
309	 * On multi-fabric platform (e.g. Numascale NumaChip) a
310	 * platform-specific handler needs to be called to fixup some
311	 * IDs of the CPU.
312	 */
313	if (x86_cpuinit.fixup_cpu_id)
314		x86_cpuinit.fixup_cpu_id(c, node);
315
316	if (!node_online(node)) {
317		/*
318		 * Two possibilities here:
319		 *
320		 * - The CPU is missing memory and no node was created.  In
321		 *   that case try picking one from a nearby CPU.
322		 *
323		 * - The APIC IDs differ from the HyperTransport node IDs
324		 *   which the K8 northbridge parsing fills in.  Assume
325		 *   they are all increased by a constant offset, but in
326		 *   the same order as the HT nodeids.  If that doesn't
327		 *   result in a usable node fall back to the path for the
328		 *   previous case.
329		 *
330		 * This workaround operates directly on the mapping between
331		 * APIC ID and NUMA node, assuming certain relationship
332		 * between APIC ID, HT node ID and NUMA topology.  As going
333		 * through CPU mapping may alter the outcome, directly
334		 * access __apicid_to_node[].
335		 */
336		int ht_nodeid = c->topo.initial_apicid;
337
338		if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
339			node = __apicid_to_node[ht_nodeid];
340		/* Pick a nearby node */
341		if (!node_online(node))
342			node = nearby_node(apicid);
343	}
344	numa_set_node(cpu, node);
345#endif
346}
347
348static void bsp_determine_snp(struct cpuinfo_x86 *c)
349{
350#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
351	cc_vendor = CC_VENDOR_AMD;
352
353	if (cpu_has(c, X86_FEATURE_SEV_SNP)) {
354		/*
355		 * RMP table entry format is not architectural and is defined by the
356		 * per-processor PPR. Restrict SNP support on the known CPU models
357		 * for which the RMP table entry format is currently defined for.
358		 */
359		if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
360		    c->x86 >= 0x19 && snp_probe_rmptable_info()) {
361			cc_platform_set(CC_ATTR_HOST_SEV_SNP);
362		} else {
363			setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
364			cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
365		}
366	}
367#endif
368}
369
370static void bsp_init_amd(struct cpuinfo_x86 *c)
371{
372	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
373
374		if (c->x86 > 0x10 ||
375		    (c->x86 == 0x10 && c->x86_model >= 0x2)) {
376			u64 val;
377
378			rdmsrl(MSR_K7_HWCR, val);
379			if (!(val & BIT(24)))
380				pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
381		}
382	}
383
384	if (c->x86 == 0x15) {
385		unsigned long upperbit;
386		u32 cpuid, assoc;
387
388		cpuid	 = cpuid_edx(0x80000005);
389		assoc	 = cpuid >> 16 & 0xff;
390		upperbit = ((cpuid >> 24) << 10) / assoc;
391
392		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
393		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
394
395		/* A random value per boot for bit slice [12:upper_bit) */
396		va_align.bits = get_random_u32() & va_align.mask;
397	}
398
399	if (cpu_has(c, X86_FEATURE_MWAITX))
400		use_mwaitx_delay();
401
402	if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
403	    !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
404	    c->x86 >= 0x15 && c->x86 <= 0x17) {
405		unsigned int bit;
406
407		switch (c->x86) {
408		case 0x15: bit = 54; break;
409		case 0x16: bit = 33; break;
410		case 0x17: bit = 10; break;
411		default: return;
412		}
413		/*
414		 * Try to cache the base value so further operations can
415		 * avoid RMW. If that faults, do not enable SSBD.
416		 */
417		if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
418			setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
419			setup_force_cpu_cap(X86_FEATURE_SSBD);
420			x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
421		}
422	}
423
424	resctrl_cpu_detect(c);
425
426	/* Figure out Zen generations: */
427	switch (c->x86) {
428	case 0x17:
429		switch (c->x86_model) {
430		case 0x00 ... 0x2f:
431		case 0x50 ... 0x5f:
432			setup_force_cpu_cap(X86_FEATURE_ZEN1);
433			break;
434		case 0x30 ... 0x4f:
435		case 0x60 ... 0x7f:
436		case 0x90 ... 0x91:
437		case 0xa0 ... 0xaf:
438			setup_force_cpu_cap(X86_FEATURE_ZEN2);
439			break;
440		default:
441			goto warn;
442		}
443		break;
444
445	case 0x19:
446		switch (c->x86_model) {
447		case 0x00 ... 0x0f:
448		case 0x20 ... 0x5f:
449			setup_force_cpu_cap(X86_FEATURE_ZEN3);
450			break;
451		case 0x10 ... 0x1f:
452		case 0x60 ... 0xaf:
453			setup_force_cpu_cap(X86_FEATURE_ZEN4);
454			break;
455		default:
456			goto warn;
457		}
458		break;
459
460	case 0x1a:
461		switch (c->x86_model) {
462		case 0x00 ... 0x2f:
463		case 0x40 ... 0x4f:
464		case 0x70 ... 0x7f:
465			setup_force_cpu_cap(X86_FEATURE_ZEN5);
466			break;
467		default:
468			goto warn;
469		}
470		break;
471
472	default:
473		break;
474	}
475
476	bsp_determine_snp(c);
477	return;
478
479warn:
480	WARN_ONCE(1, "Family 0x%x, model: 0x%x??\n", c->x86, c->x86_model);
481}
482
483static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
484{
485	u64 msr;
486
487	/*
488	 * BIOS support is required for SME and SEV.
489	 *   For SME: If BIOS has enabled SME then adjust x86_phys_bits by
490	 *	      the SME physical address space reduction value.
491	 *	      If BIOS has not enabled SME then don't advertise the
492	 *	      SME feature (set in scattered.c).
493	 *	      If the kernel has not enabled SME via any means then
494	 *	      don't advertise the SME feature.
495	 *   For SEV: If BIOS has not enabled SEV then don't advertise SEV and
496	 *	      any additional functionality based on it.
497	 *
498	 *   In all cases, since support for SME and SEV requires long mode,
499	 *   don't advertise the feature under CONFIG_X86_32.
500	 */
501	if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
502		/* Check if memory encryption is enabled */
503		rdmsrl(MSR_AMD64_SYSCFG, msr);
504		if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
505			goto clear_all;
506
507		/*
508		 * Always adjust physical address bits. Even though this
509		 * will be a value above 32-bits this is still done for
510		 * CONFIG_X86_32 so that accurate values are reported.
511		 */
512		c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
513
514		if (IS_ENABLED(CONFIG_X86_32))
515			goto clear_all;
516
517		if (!sme_me_mask)
518			setup_clear_cpu_cap(X86_FEATURE_SME);
519
520		rdmsrl(MSR_K7_HWCR, msr);
521		if (!(msr & MSR_K7_HWCR_SMMLOCK))
522			goto clear_sev;
523
524		return;
525
526clear_all:
527		setup_clear_cpu_cap(X86_FEATURE_SME);
528clear_sev:
529		setup_clear_cpu_cap(X86_FEATURE_SEV);
530		setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
531		setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
532	}
533}
534
535static void early_init_amd(struct cpuinfo_x86 *c)
536{
537	u32 dummy;
538
539	if (c->x86 >= 0xf)
540		set_cpu_cap(c, X86_FEATURE_K8);
541
542	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
543
544	/*
545	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
546	 * with P/T states and does not stop in deep C-states
547	 */
548	if (c->x86_power & (1 << 8)) {
549		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
550		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
551	}
552
553	/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
554	if (c->x86_power & BIT(12))
555		set_cpu_cap(c, X86_FEATURE_ACC_POWER);
556
557	/* Bit 14 indicates the Runtime Average Power Limit interface. */
558	if (c->x86_power & BIT(14))
559		set_cpu_cap(c, X86_FEATURE_RAPL);
560
561#ifdef CONFIG_X86_64
562	set_cpu_cap(c, X86_FEATURE_SYSCALL32);
563#else
564	/*  Set MTRR capability flag if appropriate */
565	if (c->x86 == 5)
566		if (c->x86_model == 13 || c->x86_model == 9 ||
567		    (c->x86_model == 8 && c->x86_stepping >= 8))
568			set_cpu_cap(c, X86_FEATURE_K6_MTRR);
569#endif
570#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
571	/*
572	 * ApicID can always be treated as an 8-bit value for AMD APIC versions
573	 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
574	 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
575	 * after 16h.
576	 */
577	if (boot_cpu_has(X86_FEATURE_APIC)) {
578		if (c->x86 > 0x16)
579			set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
580		else if (c->x86 >= 0xf) {
581			/* check CPU config space for extended APIC ID */
582			unsigned int val;
583
584			val = read_pci_config(0, 24, 0, 0x68);
585			if ((val >> 17 & 0x3) == 0x3)
586				set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
587		}
588	}
589#endif
590
591	/*
592	 * This is only needed to tell the kernel whether to use VMCALL
593	 * and VMMCALL.  VMMCALL is never executed except under virt, so
594	 * we can set it unconditionally.
595	 */
596	set_cpu_cap(c, X86_FEATURE_VMMCALL);
597
598	/* F16h erratum 793, CVE-2013-6885 */
599	if (c->x86 == 0x16 && c->x86_model <= 0xf)
600		msr_set_bit(MSR_AMD64_LS_CFG, 15);
601
602	early_detect_mem_encrypt(c);
603
604	if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) {
605		if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB))
606			setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
607		else if (c->x86 >= 0x19 && !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
608			setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
609			setup_force_cpu_cap(X86_FEATURE_SBPB);
610		}
611	}
612}
613
614static void init_amd_k8(struct cpuinfo_x86 *c)
615{
616	u32 level;
617	u64 value;
618
619	/* On C+ stepping K8 rep microcode works well for copy/memset */
620	level = cpuid_eax(1);
621	if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
622		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
623
624	/*
625	 * Some BIOSes incorrectly force this feature, but only K8 revision D
626	 * (model = 0x14) and later actually support it.
627	 * (AMD Erratum #110, docId: 25759).
628	 */
629	if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
630		clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
631		if (!rdmsrl_amd_safe(0xc001100d, &value)) {
632			value &= ~BIT_64(32);
633			wrmsrl_amd_safe(0xc001100d, value);
634		}
635	}
636
637	if (!c->x86_model_id[0])
638		strcpy(c->x86_model_id, "Hammer");
639
640#ifdef CONFIG_SMP
641	/*
642	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
643	 * bit 6 of msr C001_0015
644	 *
645	 * Errata 63 for SH-B3 steppings
646	 * Errata 122 for all steppings (F+ have it disabled by default)
647	 */
648	msr_set_bit(MSR_K7_HWCR, 6);
649#endif
650	set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
651
652	/*
653	 * Check models and steppings affected by erratum 400. This is
654	 * used to select the proper idle routine and to enable the
655	 * check whether the machine is affected in arch_post_acpi_subsys_init()
656	 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
657	 */
658	if (c->x86_model > 0x41 ||
659	    (c->x86_model == 0x41 && c->x86_stepping >= 0x2))
660		setup_force_cpu_bug(X86_BUG_AMD_E400);
661}
662
663static void init_amd_gh(struct cpuinfo_x86 *c)
664{
665#ifdef CONFIG_MMCONF_FAM10H
666	/* do this for boot cpu */
667	if (c == &boot_cpu_data)
668		check_enable_amd_mmconf_dmi();
669
670	fam10h_check_enable_mmcfg();
671#endif
672
673	/*
674	 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
675	 * is always needed when GART is enabled, even in a kernel which has no
676	 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
677	 * If it doesn't, we do it here as suggested by the BKDG.
678	 *
679	 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
680	 */
681	msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
682
683	/*
684	 * On family 10h BIOS may not have properly enabled WC+ support, causing
685	 * it to be converted to CD memtype. This may result in performance
686	 * degradation for certain nested-paging guests. Prevent this conversion
687	 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
688	 *
689	 * NOTE: we want to use the _safe accessors so as not to #GP kvm
690	 * guests on older kvm hosts.
691	 */
692	msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
693
694	set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
695
696	/*
697	 * Check models and steppings affected by erratum 400. This is
698	 * used to select the proper idle routine and to enable the
699	 * check whether the machine is affected in arch_post_acpi_subsys_init()
700	 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
701	 */
702	if (c->x86_model > 0x2 ||
703	    (c->x86_model == 0x2 && c->x86_stepping >= 0x1))
704		setup_force_cpu_bug(X86_BUG_AMD_E400);
705}
706
707static void init_amd_ln(struct cpuinfo_x86 *c)
708{
709	/*
710	 * Apply erratum 665 fix unconditionally so machines without a BIOS
711	 * fix work.
712	 */
713	msr_set_bit(MSR_AMD64_DE_CFG, 31);
714}
715
716static bool rdrand_force;
717
718static int __init rdrand_cmdline(char *str)
719{
720	if (!str)
721		return -EINVAL;
722
723	if (!strcmp(str, "force"))
724		rdrand_force = true;
725	else
726		return -EINVAL;
727
728	return 0;
729}
730early_param("rdrand", rdrand_cmdline);
731
732static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
733{
734	/*
735	 * Saving of the MSR used to hide the RDRAND support during
736	 * suspend/resume is done by arch/x86/power/cpu.c, which is
737	 * dependent on CONFIG_PM_SLEEP.
738	 */
739	if (!IS_ENABLED(CONFIG_PM_SLEEP))
740		return;
741
742	/*
743	 * The self-test can clear X86_FEATURE_RDRAND, so check for
744	 * RDRAND support using the CPUID function directly.
745	 */
746	if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
747		return;
748
749	msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
750
751	/*
752	 * Verify that the CPUID change has occurred in case the kernel is
753	 * running virtualized and the hypervisor doesn't support the MSR.
754	 */
755	if (cpuid_ecx(1) & BIT(30)) {
756		pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
757		return;
758	}
759
760	clear_cpu_cap(c, X86_FEATURE_RDRAND);
761	pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
762}
763
764static void init_amd_jg(struct cpuinfo_x86 *c)
765{
766	/*
767	 * Some BIOS implementations do not restore proper RDRAND support
768	 * across suspend and resume. Check on whether to hide the RDRAND
769	 * instruction support via CPUID.
770	 */
771	clear_rdrand_cpuid_bit(c);
772}
773
774static void init_amd_bd(struct cpuinfo_x86 *c)
775{
776	u64 value;
777
778	/*
779	 * The way access filter has a performance penalty on some workloads.
780	 * Disable it on the affected CPUs.
781	 */
782	if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
783		if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
784			value |= 0x1E;
785			wrmsrl_safe(MSR_F15H_IC_CFG, value);
786		}
787	}
788
789	/*
790	 * Some BIOS implementations do not restore proper RDRAND support
791	 * across suspend and resume. Check on whether to hide the RDRAND
792	 * instruction support via CPUID.
793	 */
794	clear_rdrand_cpuid_bit(c);
795}
796
797static void fix_erratum_1386(struct cpuinfo_x86 *c)
798{
799	/*
800	 * Work around Erratum 1386.  The XSAVES instruction malfunctions in
801	 * certain circumstances on Zen1/2 uarch, and not all parts have had
802	 * updated microcode at the time of writing (March 2023).
803	 *
804	 * Affected parts all have no supervisor XSAVE states, meaning that
805	 * the XSAVEC instruction (which works fine) is equivalent.
806	 */
807	clear_cpu_cap(c, X86_FEATURE_XSAVES);
808}
809
810void init_spectral_chicken(struct cpuinfo_x86 *c)
811{
812#ifdef CONFIG_MITIGATION_UNRET_ENTRY
813	u64 value;
814
815	/*
816	 * On Zen2 we offer this chicken (bit) on the altar of Speculation.
817	 *
818	 * This suppresses speculation from the middle of a basic block, i.e. it
819	 * suppresses non-branch predictions.
820	 */
821	if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
822		if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
823			value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT;
824			wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
825		}
826	}
827#endif
828}
829
830static void init_amd_zen_common(void)
831{
832	setup_force_cpu_cap(X86_FEATURE_ZEN);
833#ifdef CONFIG_NUMA
834	node_reclaim_distance = 32;
835#endif
836}
837
838static void init_amd_zen1(struct cpuinfo_x86 *c)
839{
840	fix_erratum_1386(c);
841
842	/* Fix up CPUID bits, but only if not virtualised. */
843	if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
844
845		/* Erratum 1076: CPB feature bit not being set in CPUID. */
846		if (!cpu_has(c, X86_FEATURE_CPB))
847			set_cpu_cap(c, X86_FEATURE_CPB);
848	}
849
850	pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
851	setup_force_cpu_bug(X86_BUG_DIV0);
852}
853
854static bool cpu_has_zenbleed_microcode(void)
855{
856	u32 good_rev = 0;
857
858	switch (boot_cpu_data.x86_model) {
859	case 0x30 ... 0x3f: good_rev = 0x0830107b; break;
860	case 0x60 ... 0x67: good_rev = 0x0860010c; break;
861	case 0x68 ... 0x6f: good_rev = 0x08608107; break;
862	case 0x70 ... 0x7f: good_rev = 0x08701033; break;
863	case 0xa0 ... 0xaf: good_rev = 0x08a00009; break;
864
865	default:
866		return false;
867	}
868
869	if (boot_cpu_data.microcode < good_rev)
870		return false;
871
872	return true;
873}
874
875static void zen2_zenbleed_check(struct cpuinfo_x86 *c)
876{
877	if (cpu_has(c, X86_FEATURE_HYPERVISOR))
878		return;
879
880	if (!cpu_has(c, X86_FEATURE_AVX))
881		return;
882
883	if (!cpu_has_zenbleed_microcode()) {
884		pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n");
885		msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
886	} else {
887		msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
888	}
889}
890
891static void init_amd_zen2(struct cpuinfo_x86 *c)
892{
893	init_spectral_chicken(c);
894	fix_erratum_1386(c);
895	zen2_zenbleed_check(c);
896}
897
898static void init_amd_zen3(struct cpuinfo_x86 *c)
899{
900	if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
901		/*
902		 * Zen3 (Fam19 model < 0x10) parts are not susceptible to
903		 * Branch Type Confusion, but predate the allocation of the
904		 * BTC_NO bit.
905		 */
906		if (!cpu_has(c, X86_FEATURE_BTC_NO))
907			set_cpu_cap(c, X86_FEATURE_BTC_NO);
908	}
909}
910
911static void init_amd_zen4(struct cpuinfo_x86 *c)
912{
913	if (!cpu_has(c, X86_FEATURE_HYPERVISOR))
914		msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
915}
916
917static void init_amd_zen5(struct cpuinfo_x86 *c)
918{
919}
920
921static void init_amd(struct cpuinfo_x86 *c)
922{
923	u64 vm_cr;
924
925	early_init_amd(c);
926
927	/*
928	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
929	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
930	 */
931	clear_cpu_cap(c, 0*32+31);
932
933	if (c->x86 >= 0x10)
934		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
935
936	/* AMD FSRM also implies FSRS */
937	if (cpu_has(c, X86_FEATURE_FSRM))
938		set_cpu_cap(c, X86_FEATURE_FSRS);
939
940	/* K6s reports MCEs but don't actually have all the MSRs */
941	if (c->x86 < 6)
942		clear_cpu_cap(c, X86_FEATURE_MCE);
943
944	switch (c->x86) {
945	case 4:    init_amd_k5(c); break;
946	case 5:    init_amd_k6(c); break;
947	case 6:	   init_amd_k7(c); break;
948	case 0xf:  init_amd_k8(c); break;
949	case 0x10: init_amd_gh(c); break;
950	case 0x12: init_amd_ln(c); break;
951	case 0x15: init_amd_bd(c); break;
952	case 0x16: init_amd_jg(c); break;
953	}
954
955	/*
956	 * Save up on some future enablement work and do common Zen
957	 * settings.
958	 */
959	if (c->x86 >= 0x17)
960		init_amd_zen_common();
961
962	if (boot_cpu_has(X86_FEATURE_ZEN1))
963		init_amd_zen1(c);
964	else if (boot_cpu_has(X86_FEATURE_ZEN2))
965		init_amd_zen2(c);
966	else if (boot_cpu_has(X86_FEATURE_ZEN3))
967		init_amd_zen3(c);
968	else if (boot_cpu_has(X86_FEATURE_ZEN4))
969		init_amd_zen4(c);
970	else if (boot_cpu_has(X86_FEATURE_ZEN5))
971		init_amd_zen5(c);
972
973	/*
974	 * Enable workaround for FXSAVE leak on CPUs
975	 * without a XSaveErPtr feature
976	 */
977	if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
978		set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
979
980	cpu_detect_cache_sizes(c);
981
982	srat_detect_node(c);
983
984	init_amd_cacheinfo(c);
985
986	if (cpu_has(c, X86_FEATURE_SVM)) {
987		rdmsrl(MSR_VM_CR, vm_cr);
988		if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) {
989			pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n");
990			clear_cpu_cap(c, X86_FEATURE_SVM);
991		}
992	}
993
994	if (!cpu_has(c, X86_FEATURE_LFENCE_RDTSC) && cpu_has(c, X86_FEATURE_XMM2)) {
995		/*
996		 * Use LFENCE for execution serialization.  On families which
997		 * don't have that MSR, LFENCE is already serializing.
998		 * msr_set_bit() uses the safe accessors, too, even if the MSR
999		 * is not present.
1000		 */
1001		msr_set_bit(MSR_AMD64_DE_CFG,
1002			    MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
1003
1004		/* A serializing LFENCE stops RDTSC speculation */
1005		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
1006	}
1007
1008	/*
1009	 * Family 0x12 and above processors have APIC timer
1010	 * running in deep C states.
1011	 */
1012	if (c->x86 > 0x11)
1013		set_cpu_cap(c, X86_FEATURE_ARAT);
1014
1015	/* 3DNow or LM implies PREFETCHW */
1016	if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
1017		if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
1018			set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
1019
1020	/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
1021	if (!cpu_feature_enabled(X86_FEATURE_XENPV))
1022		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
1023
1024	/*
1025	 * Turn on the Instructions Retired free counter on machines not
1026	 * susceptible to erratum #1054 "Instructions Retired Performance
1027	 * Counter May Be Inaccurate".
1028	 */
1029	if (cpu_has(c, X86_FEATURE_IRPERF) &&
1030	    (boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f))
1031		msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
1032
1033	check_null_seg_clears_base(c);
1034
1035	/*
1036	 * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up
1037	 * using the trampoline code and as part of it, MSR_EFER gets prepared there in
1038	 * order to be replicated onto them. Regardless, set it here again, if not set,
1039	 * to protect against any future refactoring/code reorganization which might
1040	 * miss setting this important bit.
1041	 */
1042	if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1043	    cpu_has(c, X86_FEATURE_AUTOIBRS))
1044		WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
1045
1046	/* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
1047	clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
1048}
1049
1050#ifdef CONFIG_X86_32
1051static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
1052{
1053	/* AMD errata T13 (order #21922) */
1054	if (c->x86 == 6) {
1055		/* Duron Rev A0 */
1056		if (c->x86_model == 3 && c->x86_stepping == 0)
1057			size = 64;
1058		/* Tbird rev A1/A2 */
1059		if (c->x86_model == 4 &&
1060			(c->x86_stepping == 0 || c->x86_stepping == 1))
1061			size = 256;
1062	}
1063	return size;
1064}
1065#endif
1066
1067static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1068{
1069	u32 ebx, eax, ecx, edx;
1070	u16 mask = 0xfff;
1071
1072	if (c->x86 < 0xf)
1073		return;
1074
1075	if (c->extended_cpuid_level < 0x80000006)
1076		return;
1077
1078	cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1079
1080	tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1081	tlb_lli_4k[ENTRIES] = ebx & mask;
1082
1083	/*
1084	 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1085	 * characteristics from the CPUID function 0x80000005 instead.
1086	 */
1087	if (c->x86 == 0xf) {
1088		cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1089		mask = 0xff;
1090	}
1091
1092	/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1093	if (!((eax >> 16) & mask))
1094		tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1095	else
1096		tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1097
1098	/* a 4M entry uses two 2M entries */
1099	tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1100
1101	/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1102	if (!(eax & mask)) {
1103		/* Erratum 658 */
1104		if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1105			tlb_lli_2m[ENTRIES] = 1024;
1106		} else {
1107			cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1108			tlb_lli_2m[ENTRIES] = eax & 0xff;
1109		}
1110	} else
1111		tlb_lli_2m[ENTRIES] = eax & mask;
1112
1113	tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1114}
1115
1116static const struct cpu_dev amd_cpu_dev = {
1117	.c_vendor	= "AMD",
1118	.c_ident	= { "AuthenticAMD" },
1119#ifdef CONFIG_X86_32
1120	.legacy_models = {
1121		{ .family = 4, .model_names =
1122		  {
1123			  [3] = "486 DX/2",
1124			  [7] = "486 DX/2-WB",
1125			  [8] = "486 DX/4",
1126			  [9] = "486 DX/4-WB",
1127			  [14] = "Am5x86-WT",
1128			  [15] = "Am5x86-WB"
1129		  }
1130		},
1131	},
1132	.legacy_cache_size = amd_size_cache,
1133#endif
1134	.c_early_init   = early_init_amd,
1135	.c_detect_tlb	= cpu_detect_tlb_amd,
1136	.c_bsp_init	= bsp_init_amd,
1137	.c_init		= init_amd,
1138	.c_x86_vendor	= X86_VENDOR_AMD,
1139};
1140
1141cpu_dev_register(amd_cpu_dev);
1142
1143static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask);
1144
1145static unsigned int amd_msr_dr_addr_masks[] = {
1146	MSR_F16H_DR0_ADDR_MASK,
1147	MSR_F16H_DR1_ADDR_MASK,
1148	MSR_F16H_DR1_ADDR_MASK + 1,
1149	MSR_F16H_DR1_ADDR_MASK + 2
1150};
1151
1152void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr)
1153{
1154	int cpu = smp_processor_id();
1155
1156	if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
1157		return;
1158
1159	if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
1160		return;
1161
1162	if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask)
1163		return;
1164
1165	wrmsr(amd_msr_dr_addr_masks[dr], mask, 0);
1166	per_cpu(amd_dr_addr_mask, cpu)[dr] = mask;
1167}
1168
1169unsigned long amd_get_dr_addr_mask(unsigned int dr)
1170{
1171	if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
1172		return 0;
1173
1174	if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
1175		return 0;
1176
1177	return per_cpu(amd_dr_addr_mask[dr], smp_processor_id());
1178}
1179EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask);
1180
1181u32 amd_get_highest_perf(void)
1182{
1183	struct cpuinfo_x86 *c = &boot_cpu_data;
1184
1185	if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
1186			       (c->x86_model >= 0x70 && c->x86_model < 0x80)))
1187		return 166;
1188
1189	if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
1190			       (c->x86_model >= 0x40 && c->x86_model < 0x70)))
1191		return 166;
1192
1193	return 255;
1194}
1195EXPORT_SYMBOL_GPL(amd_get_highest_perf);
1196
1197static void zenbleed_check_cpu(void *unused)
1198{
1199	struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
1200
1201	zen2_zenbleed_check(c);
1202}
1203
1204void amd_check_microcode(void)
1205{
1206	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
1207		return;
1208
1209	on_each_cpu(zenbleed_check_cpu, NULL, 1);
1210}
1211
1212/*
1213 * Issue a DIV 0/1 insn to clear any division data from previous DIV
1214 * operations.
1215 */
1216void noinstr amd_clear_divider(void)
1217{
1218	asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
1219		     :: "a" (0), "d" (0), "r" (1));
1220}
1221EXPORT_SYMBOL_GPL(amd_clear_divider);
1222