1119452Sobrien/*-
240003Skato * Copyright (c) KATO Takenori, 1997, 1998.
324113Skato *
424113Skato * All rights reserved.  Unpublished rights reserved under the copyright
524113Skato * laws of Japan.
624113Skato *
724113Skato * Redistribution and use in source and binary forms, with or without
824113Skato * modification, are permitted provided that the following conditions
924113Skato * are met:
1024113Skato *
1124113Skato * 1. Redistributions of source code must retain the above copyright
1224113Skato *    notice, this list of conditions and the following disclaimer as
1324113Skato *    the first lines of this file unmodified.
1424113Skato * 2. Redistributions in binary form must reproduce the above copyright
1524113Skato *    notice, this list of conditions and the following disclaimer in the
1624113Skato *    documentation and/or other materials provided with the distribution.
1724113Skato *
1824113Skato * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1924113Skato * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
2024113Skato * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
2124113Skato * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
2224113Skato * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2324113Skato * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2424113Skato * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2524113Skato * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2624113Skato * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2724113Skato * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2824113Skato */
2924113Skato
30115683Sobrien#include <sys/cdefs.h>
31115683Sobrien__FBSDID("$FreeBSD$");
32115683Sobrien
3324113Skato#include "opt_cpu.h"
3424113Skato
3524113Skato#include <sys/param.h>
3624113Skato#include <sys/kernel.h>
3724113Skato#include <sys/systm.h>
3879609Speter#include <sys/sysctl.h>
3924113Skato
4024113Skato#include <machine/cputypes.h>
4124113Skato#include <machine/md_var.h>
4224113Skato#include <machine/specialreg.h>
4324113Skato
44168439Sru#include <vm/vm.h>
45168439Sru#include <vm/pmap.h>
46168439Sru
47147741Sdelphij#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
48103064Speter#define CPU_ENABLE_SSE
49103064Speter#endif
50103064Speter
5140003Skato#if defined(I586_CPU) && defined(CPU_WT_ALLOC)
5240003Skatovoid	enable_K5_wt_alloc(void);
5340003Skatovoid	enable_K6_wt_alloc(void);
5442112Smsmithvoid	enable_K6_2_wt_alloc(void);
5540003Skato#endif
5640003Skato
5724113Skato#ifdef I486_CPU
5824113Skatostatic void init_5x86(void);
5924113Skatostatic void init_bluelightning(void);
6024113Skatostatic void init_486dlc(void);
6125159Skatostatic void init_cy486dx(void);
6224113Skato#ifdef CPU_I486_ON_386
6324113Skatostatic void init_i486_on_386(void);
6424113Skato#endif
6524113Skatostatic void init_6x86(void);
6624113Skato#endif /* I486_CPU */
6724113Skato
6827654Skato#ifdef I686_CPU
6926298Skatostatic void	init_6x86MX(void);
7036094Skatostatic void	init_ppro(void);
7161616Skatostatic void	init_mendocino(void);
7281879Speter#endif
7326298Skato
74109700Sjhbstatic int	hw_instruction_sse;
7579609SpeterSYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
76109700Sjhb    &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
77199067Skuriyama/*
78199067Skuriyama * -1: automatic (default)
79199067Skuriyama *  0: keep enable CLFLUSH
80199067Skuriyama *  1: force disable CLFLUSH
81199067Skuriyama */
82199067Skuriyamastatic int	hw_clflush_disable = -1;
8379609Speter
8482957Speter/* Must *NOT* be BSS or locore will bzero these after setting them */
8582957Speterint	cpu = 0;		/* Are we 386, 386sx, 486, etc? */
8682957Speteru_int	cpu_feature = 0;	/* Feature flags */
87146263Sobrienu_int	cpu_feature2 = 0;	/* Feature flags */
88151348Sjkimu_int	amd_feature = 0;	/* AMD feature flags */
89151348Sjkimu_int	amd_feature2 = 0;	/* AMD feature flags */
90184101Sjkimu_int	amd_pminfo = 0;		/* AMD advanced power management info */
91160309Smru_int	via_feature_rng = 0;	/* VIA RNG features */
92160309Smru_int	via_feature_xcrypt = 0;	/* VIA ACE features */
9382957Speteru_int	cpu_high = 0;		/* Highest arg to CPUID */
94109700Sjhbu_int	cpu_id = 0;		/* Stepping ID */
95109696Sjhbu_int	cpu_procinfo = 0;	/* HyperThreading Info / Brand Index / CLFUSH */
96151348Sjkimu_int	cpu_procinfo2 = 0;	/* Multicore info */
97109700Sjhbchar	cpu_vendor[20] = "";	/* CPU Origin code */
98185341Sjkimu_int	cpu_vendor_id = 0;	/* CPU vendor ID */
99195940Skibu_int	cpu_clflush_line_size = 32;
100253747Savgu_int	cpu_mon_mwait_flags;	/* MONITOR/MWAIT flags (CPUID.05H.ECX) */
101253747Savgu_int	cpu_mon_min_size;	/* MONITOR minimum range size, bytes */
102253747Savgu_int	cpu_mon_max_size;	/* MONITOR minimum range size, bytes */
103109700Sjhb
104160309SmrSYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD,
105220018Sjkim	&via_feature_rng, 0, "VIA RNG feature available in CPU");
106160309SmrSYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD,
107220018Sjkim	&via_feature_xcrypt, 0, "VIA xcrypt feature available in CPU");
108160309Smr
10982261Speter#ifdef CPU_ENABLE_SSE
110109700Sjhbu_int	cpu_fxsr;		/* SSE enabled */
111159087Sdavidxuu_int	cpu_mxcsr_mask;		/* valid bits in mxcsr */
11282261Speter#endif
11382261Speter
11424113Skato#ifdef I486_CPU
11524113Skato/*
11624113Skato * IBM Blue Lightning
11724113Skato */
11824113Skatostatic void
11924113Skatoinit_bluelightning(void)
12024113Skato{
121214346Sjhb	register_t saveintr;
12224113Skato
12324113Skato#if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
12424113Skato	need_post_dma_flush = 1;
12524113Skato#endif
12624113Skato
127214346Sjhb	saveintr = intr_disable();
12824113Skato
12924113Skato	load_cr0(rcr0() | CR0_CD | CR0_NW);
13024113Skato	invd();
13124113Skato
13224113Skato#ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
13324113Skato	wrmsr(0x1000, 0x9c92LL);	/* FP operand can be cacheable on Cyrix FPU */
13424113Skato#else
13524113Skato	wrmsr(0x1000, 0x1c92LL);	/* Intel FPU */
13624113Skato#endif
13724113Skato	/* Enables 13MB and 0-640KB cache. */
13824113Skato	wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
13924113Skato#ifdef CPU_BLUELIGHTNING_3X
14024113Skato	wrmsr(0x1002, 0x04000000LL);	/* Enables triple-clock mode. */
14124113Skato#else
14224113Skato	wrmsr(0x1002, 0x03000000LL);	/* Enables double-clock mode. */
14324113Skato#endif
14424113Skato
14524113Skato	/* Enable caching in CR0. */
14624113Skato	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
14724113Skato	invd();
148214346Sjhb	intr_restore(saveintr);
14924113Skato}
15024113Skato
15124113Skato/*
15225159Skato * Cyrix 486SLC/DLC/SR/DR series
15324113Skato */
15424113Skatostatic void
15524113Skatoinit_486dlc(void)
15624113Skato{
157214346Sjhb	register_t saveintr;
15824113Skato	u_char	ccr0;
15924113Skato
160214346Sjhb	saveintr = intr_disable();
16124113Skato	invd();
16224113Skato
16324113Skato	ccr0 = read_cyrix_reg(CCR0);
16424113Skato#ifndef CYRIX_CACHE_WORKS
16524113Skato	ccr0 |= CCR0_NC1 | CCR0_BARB;
16624113Skato	write_cyrix_reg(CCR0, ccr0);
16724113Skato	invd();
16824113Skato#else
16924113Skato	ccr0 &= ~CCR0_NC0;
17024113Skato#ifndef CYRIX_CACHE_REALLY_WORKS
17124113Skato	ccr0 |= CCR0_NC1 | CCR0_BARB;
17224113Skato#else
17324113Skato	ccr0 |= CCR0_NC1;
17424113Skato#endif
17526985Skato#ifdef CPU_DIRECT_MAPPED_CACHE
17626985Skato	ccr0 |= CCR0_CO;			/* Direct mapped mode. */
17726985Skato#endif
17824113Skato	write_cyrix_reg(CCR0, ccr0);
17924113Skato
18024113Skato	/* Clear non-cacheable region. */
18124113Skato	write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
18224113Skato	write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
18324113Skato	write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
18424113Skato	write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
18524113Skato
18624113Skato	write_cyrix_reg(0, 0);	/* dummy write */
18724113Skato
18824113Skato	/* Enable caching in CR0. */
18924113Skato	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
19024113Skato	invd();
19124113Skato#endif /* !CYRIX_CACHE_WORKS */
192214346Sjhb	intr_restore(saveintr);
19324113Skato}
19424113Skato
19524113Skato
19624113Skato/*
19725159Skato * Cyrix 486S/DX series
19825159Skato */
19925159Skatostatic void
20025159Skatoinit_cy486dx(void)
20125159Skato{
202214346Sjhb	register_t saveintr;
20325159Skato	u_char	ccr2;
20425159Skato
205214346Sjhb	saveintr = intr_disable();
20625159Skato	invd();
20725159Skato
20825159Skato	ccr2 = read_cyrix_reg(CCR2);
20932199Skato#ifdef CPU_SUSP_HLT
21032199Skato	ccr2 |= CCR2_SUSP_HLT;
21125159Skato#endif
21265273Skato
21365273Skato#ifdef PC98
21465273Skato	/* Enables WB cache interface pin and Lock NW bit in CR0. */
21565273Skato	ccr2 |= CCR2_WB | CCR2_LOCK_NW;
21665273Skato	/* Unlock NW bit in CR0. */
21765273Skato	write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW);
21865273Skato	load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0, NW = 1 */
21965273Skato#endif
22065273Skato
22125159Skato	write_cyrix_reg(CCR2, ccr2);
222214346Sjhb	intr_restore(saveintr);
22325159Skato}
22425159Skato
22525159Skato
22625159Skato/*
22724113Skato * Cyrix 5x86
22824113Skato */
22924113Skatostatic void
23024113Skatoinit_5x86(void)
23124113Skato{
232214346Sjhb	register_t saveintr;
23324113Skato	u_char	ccr2, ccr3, ccr4, pcr0;
23424113Skato
235214346Sjhb	saveintr = intr_disable();
23624113Skato
23724113Skato	load_cr0(rcr0() | CR0_CD | CR0_NW);
23824113Skato	wbinvd();
23924113Skato
24024113Skato	(void)read_cyrix_reg(CCR3);		/* dummy */
24124113Skato
24224113Skato	/* Initialize CCR2. */
24324113Skato	ccr2 = read_cyrix_reg(CCR2);
24424113Skato	ccr2 |= CCR2_WB;
24524113Skato#ifdef CPU_SUSP_HLT
24624113Skato	ccr2 |= CCR2_SUSP_HLT;
24724113Skato#else
24824113Skato	ccr2 &= ~CCR2_SUSP_HLT;
24924113Skato#endif
25024113Skato	ccr2 |= CCR2_WT1;
25124113Skato	write_cyrix_reg(CCR2, ccr2);
25224113Skato
25324113Skato	/* Initialize CCR4. */
25424113Skato	ccr3 = read_cyrix_reg(CCR3);
25524113Skato	write_cyrix_reg(CCR3, CCR3_MAPEN0);
25624113Skato
25724113Skato	ccr4 = read_cyrix_reg(CCR4);
25824113Skato	ccr4 |= CCR4_DTE;
25924113Skato	ccr4 |= CCR4_MEM;
26024113Skato#ifdef CPU_FASTER_5X86_FPU
26124113Skato	ccr4 |= CCR4_FASTFPE;
26224113Skato#else
26324113Skato	ccr4 &= ~CCR4_FASTFPE;
26424113Skato#endif
26524113Skato	ccr4 &= ~CCR4_IOMASK;
26624113Skato	/********************************************************************
26724113Skato	 * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
26824113Skato	 * should be 0 for errata fix.
26924113Skato	 ********************************************************************/
27024113Skato#ifdef CPU_IORT
27124113Skato	ccr4 |= CPU_IORT & CCR4_IOMASK;
27224113Skato#endif
27324113Skato	write_cyrix_reg(CCR4, ccr4);
27424113Skato
27524113Skato	/* Initialize PCR0. */
27624113Skato	/****************************************************************
27724113Skato	 * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
27824113Skato	 * BTB_EN might make your system unstable.
27924113Skato	 ****************************************************************/
28024113Skato	pcr0 = read_cyrix_reg(PCR0);
28124113Skato#ifdef CPU_RSTK_EN
28224113Skato	pcr0 |= PCR0_RSTK;
28324113Skato#else
28424113Skato	pcr0 &= ~PCR0_RSTK;
28524113Skato#endif
28624113Skato#ifdef CPU_BTB_EN
28724113Skato	pcr0 |= PCR0_BTB;
28824113Skato#else
28924113Skato	pcr0 &= ~PCR0_BTB;
29024113Skato#endif
29124113Skato#ifdef CPU_LOOP_EN
29224113Skato	pcr0 |= PCR0_LOOP;
29324113Skato#else
29424113Skato	pcr0 &= ~PCR0_LOOP;
29524113Skato#endif
29624113Skato
29724113Skato	/****************************************************************
29824113Skato	 * WARNING: if you use a memory mapped I/O device, don't use
29924113Skato	 * DISABLE_5X86_LSSER option, which may reorder memory mapped
30024113Skato	 * I/O access.
30124113Skato	 * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
30224113Skato	 ****************************************************************/
30324113Skato#ifdef CPU_DISABLE_5X86_LSSER
30424113Skato	pcr0 &= ~PCR0_LSSER;
30524113Skato#else
30624113Skato	pcr0 |= PCR0_LSSER;
30724113Skato#endif
30824113Skato	write_cyrix_reg(PCR0, pcr0);
30924113Skato
31024113Skato	/* Restore CCR3. */
31124113Skato	write_cyrix_reg(CCR3, ccr3);
31224113Skato
31324113Skato	(void)read_cyrix_reg(0x80);		/* dummy */
31424113Skato
31524113Skato	/* Unlock NW bit in CR0. */
31624113Skato	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
31724113Skato	load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0, NW = 1 */
31824113Skato	/* Lock NW bit in CR0. */
31924113Skato	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
32024113Skato
321214346Sjhb	intr_restore(saveintr);
32224113Skato}
32324113Skato
32424113Skato#ifdef CPU_I486_ON_386
32524113Skato/*
32624113Skato * There are i486 based upgrade products for i386 machines.
327214346Sjhb * In this case, BIOS doesn't enable CPU cache.
32824113Skato */
329105216Sphkstatic void
33024113Skatoinit_i486_on_386(void)
33124113Skato{
332214346Sjhb	register_t saveintr;
33324113Skato
33424113Skato#if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
33524113Skato	need_post_dma_flush = 1;
33624113Skato#endif
33724113Skato
338214346Sjhb	saveintr = intr_disable();
33924113Skato
34024113Skato	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0, NW = 0 */
34124113Skato
342214346Sjhb	intr_restore(saveintr);
34324113Skato}
34424113Skato#endif
34524113Skato
34624113Skato/*
34724113Skato * Cyrix 6x86
34824113Skato *
34924113Skato * XXX - What should I do here?  Please let me know.
35024113Skato */
35124113Skatostatic void
35224113Skatoinit_6x86(void)
35324113Skato{
354214346Sjhb	register_t saveintr;
35524113Skato	u_char	ccr3, ccr4;
35624113Skato
357214346Sjhb	saveintr = intr_disable();
35824113Skato
35924113Skato	load_cr0(rcr0() | CR0_CD | CR0_NW);
36024113Skato	wbinvd();
36124113Skato
36224113Skato	/* Initialize CCR0. */
36324113Skato	write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
36424113Skato
36530162Skato	/* Initialize CCR1. */
36630162Skato#ifdef CPU_CYRIX_NO_LOCK
36731338Sjlemon	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
36830162Skato#else
36931338Sjlemon	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
37030162Skato#endif
37130162Skato
37224113Skato	/* Initialize CCR2. */
37324113Skato#ifdef CPU_SUSP_HLT
37424113Skato	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
37524113Skato#else
37624113Skato	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
37724113Skato#endif
37824113Skato
37924113Skato	ccr3 = read_cyrix_reg(CCR3);
38024113Skato	write_cyrix_reg(CCR3, CCR3_MAPEN0);
38124113Skato
38224113Skato	/* Initialize CCR4. */
38324113Skato	ccr4 = read_cyrix_reg(CCR4);
38424113Skato	ccr4 |= CCR4_DTE;
38524113Skato	ccr4 &= ~CCR4_IOMASK;
38624113Skato#ifdef CPU_IORT
38724113Skato	write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
38824113Skato#else
38924113Skato	write_cyrix_reg(CCR4, ccr4 | 7);
39024113Skato#endif
39124113Skato
39230162Skato	/* Initialize CCR5. */
39330162Skato#ifdef CPU_WT_ALLOC
39430162Skato	write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
39530162Skato#endif
39630162Skato
39724113Skato	/* Restore CCR3. */
39824113Skato	write_cyrix_reg(CCR3, ccr3);
39924113Skato
40024113Skato	/* Unlock NW bit in CR0. */
40124113Skato	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
40224113Skato
40324113Skato	/*
40424113Skato	 * Earlier revision of the 6x86 CPU could crash the system if
40524113Skato	 * L1 cache is in write-back mode.
40624113Skato	 */
40724113Skato	if ((cyrix_did & 0xff00) > 0x1600)
40824113Skato		load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
40924113Skato	else {
41024113Skato		/* Revision 2.6 and lower. */
41124113Skato#ifdef CYRIX_CACHE_REALLY_WORKS
41224113Skato		load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
41324113Skato#else
41424113Skato		load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0 and NW = 1 */
41524113Skato#endif
41624113Skato	}
41724113Skato
41824113Skato	/* Lock NW bit in CR0. */
41924113Skato	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
42024113Skato
421214346Sjhb	intr_restore(saveintr);
42224113Skato}
42324113Skato#endif /* I486_CPU */
42424113Skato
425220018Sjkim#ifdef I586_CPU
426220018Sjkim/*
427254384Sjkim * Rise mP6
428254384Sjkim */
429254384Sjkimstatic void
430254384Sjkiminit_rise(void)
431254384Sjkim{
432254384Sjkim
433254384Sjkim	/*
434254384Sjkim	 * The CMPXCHG8B instruction is always available but hidden.
435254384Sjkim	 */
436254384Sjkim	cpu_feature |= CPUID_CX8;
437254384Sjkim}
438254384Sjkim
439254384Sjkim/*
440220018Sjkim * IDT WinChip C6/2/2A/2B/3
441220018Sjkim *
442220018Sjkim * http://www.centtech.com/winchip_bios_writers_guide_v4_0.pdf
443220018Sjkim */
444220018Sjkimstatic void
445220018Sjkiminit_winchip(void)
446220018Sjkim{
447220018Sjkim	u_int regs[4];
448220018Sjkim	uint64_t fcr;
449220018Sjkim
450220018Sjkim	fcr = rdmsr(0x0107);
451220018Sjkim
452220018Sjkim	/*
453220018Sjkim	 * Set ECX8, DSMC, DTLOCK/EDCTLB, EMMX, and ERETSTK and clear DPDC.
454220018Sjkim	 */
455220018Sjkim	fcr |= (1 << 1) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 16);
456220018Sjkim	fcr &= ~(1ULL << 11);
457220018Sjkim
458220018Sjkim	/*
459220018Sjkim	 * Additioanlly, set EBRPRED, E2MMX and EAMD3D for WinChip 2 and 3.
460220018Sjkim	 */
461220018Sjkim	if (CPUID_TO_MODEL(cpu_id) >= 8)
462220018Sjkim		fcr |= (1 << 12) | (1 << 19) | (1 << 20);
463220018Sjkim
464220018Sjkim	wrmsr(0x0107, fcr);
465220018Sjkim	do_cpuid(1, regs);
466220018Sjkim	cpu_feature = regs[3];
467220018Sjkim}
468220018Sjkim#endif
469220018Sjkim
47027654Skato#ifdef I686_CPU
47126298Skato/*
47226298Skato * Cyrix 6x86MX (code-named M2)
47326298Skato *
47426298Skato * XXX - What should I do here?  Please let me know.
47526298Skato */
47626298Skatostatic void
47726298Skatoinit_6x86MX(void)
47826298Skato{
479214346Sjhb	register_t saveintr;
48026298Skato	u_char	ccr3, ccr4;
48126298Skato
482214346Sjhb	saveintr = intr_disable();
48326298Skato
48426298Skato	load_cr0(rcr0() | CR0_CD | CR0_NW);
48526298Skato	wbinvd();
48626298Skato
48726298Skato	/* Initialize CCR0. */
48826298Skato	write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
48926298Skato
49030162Skato	/* Initialize CCR1. */
49130162Skato#ifdef CPU_CYRIX_NO_LOCK
49231338Sjlemon	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
49330162Skato#else
49431338Sjlemon	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
49530162Skato#endif
49630162Skato
49726298Skato	/* Initialize CCR2. */
49826298Skato#ifdef CPU_SUSP_HLT
49926298Skato	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
50026298Skato#else
50126298Skato	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
50226298Skato#endif
50326298Skato
50426298Skato	ccr3 = read_cyrix_reg(CCR3);
50526298Skato	write_cyrix_reg(CCR3, CCR3_MAPEN0);
50626298Skato
50726298Skato	/* Initialize CCR4. */
50826298Skato	ccr4 = read_cyrix_reg(CCR4);
50926298Skato	ccr4 &= ~CCR4_IOMASK;
51026298Skato#ifdef CPU_IORT
51126298Skato	write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
51226298Skato#else
51326298Skato	write_cyrix_reg(CCR4, ccr4 | 7);
51426298Skato#endif
51526298Skato
51630162Skato	/* Initialize CCR5. */
51730162Skato#ifdef CPU_WT_ALLOC
51830162Skato	write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
51930162Skato#endif
52030162Skato
52126298Skato	/* Restore CCR3. */
52226298Skato	write_cyrix_reg(CCR3, ccr3);
52326298Skato
52426298Skato	/* Unlock NW bit in CR0. */
52526298Skato	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
52626298Skato
52726298Skato	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
52826298Skato
52926298Skato	/* Lock NW bit in CR0. */
53026298Skato	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
53126298Skato
532214346Sjhb	intr_restore(saveintr);
53326298Skato}
53436094Skato
53536094Skatostatic void
53636094Skatoinit_ppro(void)
53736094Skato{
53840003Skato	u_int64_t	apicbase;
53936094Skato
54036094Skato	/*
541122425Sjhb	 * Local APIC should be disabled if it is not going to be used.
54236094Skato	 */
543118955Sjhb	apicbase = rdmsr(MSR_APICBASE);
544118955Sjhb	apicbase &= ~APICBASE_ENABLED;
545118955Sjhb	wrmsr(MSR_APICBASE, apicbase);
54636094Skato}
54761616Skato
54861616Skato/*
54961616Skato * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
55061616Skato * L2 cache).
55161616Skato */
552104094Sphkstatic void
55361616Skatoinit_mendocino(void)
55461616Skato{
55561616Skato#ifdef CPU_PPRO2CELERON
556214346Sjhb	register_t	saveintr;
55761616Skato	u_int64_t	bbl_cr_ctl3;
55861616Skato
559214346Sjhb	saveintr = intr_disable();
56061616Skato
56161616Skato	load_cr0(rcr0() | CR0_CD | CR0_NW);
56261616Skato	wbinvd();
56361616Skato
564118955Sjhb	bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3);
56561616Skato
56661616Skato	/* If the L2 cache is configured, do nothing. */
56761616Skato	if (!(bbl_cr_ctl3 & 1)) {
56861616Skato		bbl_cr_ctl3 = 0x134052bLL;
56961616Skato
57061616Skato		/* Set L2 Cache Latency (Default: 5). */
57161616Skato#ifdef	CPU_CELERON_L2_LATENCY
57261616Skato#if CPU_L2_LATENCY > 15
57361616Skato#error invalid CPU_L2_LATENCY.
57461616Skato#endif
57561616Skato		bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
57661616Skato#else
57761616Skato		bbl_cr_ctl3 |= 5 << 1;
57861616Skato#endif
579118955Sjhb		wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3);
58061616Skato	}
58161616Skato
58261616Skato	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
583214346Sjhb	intr_restore(saveintr);
58461616Skato#endif /* CPU_PPRO2CELERON */
58561616Skato}
58681879Speter
587160309Smr/*
588220018Sjkim * Initialize special VIA features
589160309Smr */
590160309Smrstatic void
591160309Smrinit_via(void)
592160309Smr{
593160309Smr	u_int regs[4], val;
594220018Sjkim	uint64_t fcr;
595160309Smr
596220018Sjkim	/*
597220018Sjkim	 * Explicitly enable CX8 and PGE on C3.
598220018Sjkim	 *
599220018Sjkim	 * http://www.via.com.tw/download/mainboards/6/13/VIA_C3_EBGA%20datasheet110.pdf
600220018Sjkim	 */
601220018Sjkim	if (CPUID_TO_MODEL(cpu_id) <= 9)
602220018Sjkim		fcr = (1 << 1) | (1 << 7);
603220018Sjkim	else
604220018Sjkim		fcr = 0;
605220018Sjkim
606220018Sjkim	/*
607220018Sjkim	 * Check extended CPUID for PadLock features.
608220018Sjkim	 *
609220018Sjkim	 * http://www.via.com.tw/en/downloads/whitepapers/initiatives/padlock/programming_guide.pdf
610220018Sjkim	 */
611160309Smr	do_cpuid(0xc0000000, regs);
612220018Sjkim	if (regs[0] >= 0xc0000001) {
613160309Smr		do_cpuid(0xc0000001, regs);
614160309Smr		val = regs[3];
615160309Smr	} else
616160309Smr		val = 0;
617160309Smr
618220018Sjkim	/* Enable RNG if present. */
619220018Sjkim	if ((val & VIA_CPUID_HAS_RNG) != 0) {
620160309Smr		via_feature_rng = VIA_HAS_RNG;
621220018Sjkim		wrmsr(0x110B, rdmsr(0x110B) | VIA_CPUID_DO_RNG);
622160309Smr	}
623220018Sjkim
624220018Sjkim	/* Enable PadLock if present. */
625220018Sjkim	if ((val & VIA_CPUID_HAS_ACE) != 0)
626160309Smr		via_feature_xcrypt |= VIA_HAS_AES;
627220018Sjkim	if ((val & VIA_CPUID_HAS_ACE2) != 0)
628160309Smr		via_feature_xcrypt |= VIA_HAS_AESCTR;
629220018Sjkim	if ((val & VIA_CPUID_HAS_PHE) != 0)
630160309Smr		via_feature_xcrypt |= VIA_HAS_SHA;
631220018Sjkim	if ((val & VIA_CPUID_HAS_PMM) != 0)
632160309Smr		via_feature_xcrypt |= VIA_HAS_MM;
633220018Sjkim	if (via_feature_xcrypt != 0)
634220018Sjkim		fcr |= 1 << 28;
635220018Sjkim
636220018Sjkim	wrmsr(0x1107, rdmsr(0x1107) | fcr);
637160309Smr}
638160309Smr
63981879Speter#endif /* I686_CPU */
64081879Speter
641220018Sjkim#if defined(I586_CPU) || defined(I686_CPU)
642220018Sjkimstatic void
643220018Sjkiminit_transmeta(void)
644220018Sjkim{
645220018Sjkim	u_int regs[0];
646220018Sjkim
647220018Sjkim	/* Expose all hidden features. */
648220018Sjkim	wrmsr(0x80860004, rdmsr(0x80860004) | ~0UL);
649220018Sjkim	do_cpuid(1, regs);
650220018Sjkim	cpu_feature = regs[3];
651220018Sjkim}
652220018Sjkim#endif
653220018Sjkim
65479609Speter/*
65579609Speter * Initialize CR4 (Control register 4) to enable SSE instructions.
65679609Speter */
65779609Spetervoid
65879609Speterenable_sse(void)
65979609Speter{
66079609Speter#if defined(CPU_ENABLE_SSE)
66179609Speter	if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
66279609Speter		load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
66379609Speter		cpu_fxsr = hw_instruction_sse = 1;
66479609Speter	}
66579609Speter#endif
66679609Speter}
66779609Speter
668230767Skibextern int elf32_nxstack;
669230767Skib
67024113Skatovoid
67124113Skatoinitializecpu(void)
67224113Skato{
67324113Skato
67424113Skato	switch (cpu) {
67524113Skato#ifdef I486_CPU
67624113Skato	case CPU_BLUE:
67724113Skato		init_bluelightning();
67824113Skato		break;
67924113Skato	case CPU_486DLC:
68024113Skato		init_486dlc();
68124113Skato		break;
68225159Skato	case CPU_CY486DX:
68325159Skato		init_cy486dx();
68425159Skato		break;
68524113Skato	case CPU_M1SC:
68624113Skato		init_5x86();
68724113Skato		break;
68824113Skato#ifdef CPU_I486_ON_386
68924113Skato	case CPU_486:
69024113Skato		init_i486_on_386();
69124113Skato		break;
69224113Skato#endif
69324113Skato	case CPU_M1:
69424113Skato		init_6x86();
69524113Skato		break;
69624113Skato#endif /* I486_CPU */
697220018Sjkim#ifdef I586_CPU
698220018Sjkim	case CPU_586:
699220018Sjkim		switch (cpu_vendor_id) {
700220018Sjkim		case CPU_VENDOR_CENTAUR:
701220018Sjkim			init_winchip();
702220018Sjkim			break;
703220018Sjkim		case CPU_VENDOR_TRANSMETA:
704220018Sjkim			init_transmeta();
705220018Sjkim			break;
706254384Sjkim		case CPU_VENDOR_RISE:
707254384Sjkim			init_rise();
708254384Sjkim			break;
709220018Sjkim		}
710220018Sjkim		break;
711220018Sjkim#endif
71227654Skato#ifdef I686_CPU
71326298Skato	case CPU_M2:
71426298Skato		init_6x86MX();
71526298Skato		break;
71636094Skato	case CPU_686:
717220018Sjkim		switch (cpu_vendor_id) {
718220018Sjkim		case CPU_VENDOR_INTEL:
71961616Skato			switch (cpu_id & 0xff0) {
72061616Skato			case 0x610:
72161616Skato				init_ppro();
72261616Skato				break;
72361616Skato			case 0x660:
72461616Skato				init_mendocino();
72561616Skato				break;
72661616Skato			}
727220018Sjkim			break;
728220018Sjkim#ifdef CPU_ATHLON_SSE_HACK
729220018Sjkim		case CPU_VENDOR_AMD:
73090590Sdwmalone			/*
73190590Sdwmalone			 * Sometimes the BIOS doesn't enable SSE instructions.
73290590Sdwmalone			 * According to AMD document 20734, the mobile
73390590Sdwmalone			 * Duron, the (mobile) Athlon 4 and the Athlon MP
73490590Sdwmalone			 * support SSE. These correspond to cpu_id 0x66X
73590590Sdwmalone			 * or 0x67X.
73690590Sdwmalone			 */
73790590Sdwmalone			if ((cpu_feature & CPUID_XMM) == 0 &&
73890590Sdwmalone			    ((cpu_id & ~0xf) == 0x660 ||
739112445Sdwmalone			     (cpu_id & ~0xf) == 0x670 ||
740112445Sdwmalone			     (cpu_id & ~0xf) == 0x680)) {
74190590Sdwmalone				u_int regs[4];
742215523Savg				wrmsr(MSR_HWCR, rdmsr(MSR_HWCR) & ~0x08000);
74390590Sdwmalone				do_cpuid(1, regs);
74490590Sdwmalone				cpu_feature = regs[3];
74590590Sdwmalone			}
746220018Sjkim			break;
74790590Sdwmalone#endif
748220018Sjkim		case CPU_VENDOR_CENTAUR:
749220018Sjkim			init_via();
750220018Sjkim			break;
751220018Sjkim		case CPU_VENDOR_TRANSMETA:
752220018Sjkim			init_transmeta();
753220018Sjkim			break;
75461616Skato		}
755168439Sru#ifdef PAE
756168439Sru		if ((amd_feature & AMDID_NX) != 0) {
757168439Sru			uint64_t msr;
758168439Sru
759168439Sru			msr = rdmsr(MSR_EFER) | EFER_NXE;
760168439Sru			wrmsr(MSR_EFER, msr);
761168439Sru			pg_nx = PG_NX;
762230767Skib			elf32_nxstack = 1;
763168439Sru		}
764168439Sru#endif
76536094Skato		break;
76626298Skato#endif
76724113Skato	default:
76824113Skato		break;
76924113Skato	}
77079611Speter	enable_sse();
77124113Skato
772195940Skib	/*
773195940Skib	 * CPUID with %eax = 1, %ebx returns
774195940Skib	 * Bits 15-8: CLFLUSH line size
775195940Skib	 * 	(Value * 8 = cache line size in bytes)
776195940Skib	 */
777195940Skib	if ((cpu_feature & CPUID_CLFSH) != 0)
778195940Skib		cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
779197663Skib	/*
780210774Sjhb	 * XXXKIB: (temporary) hack to work around traps generated
781210774Sjhb	 * when CLFLUSHing APIC register window under virtualization
782210774Sjhb	 * environments.  These environments tend to disable the
783210774Sjhb	 * CPUID_SS feature even though the native CPU supports it.
784197663Skib	 */
785199067Skuriyama	TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
786210774Sjhb	if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1)
787197663Skib		cpu_feature &= ~CPUID_CLFSH;
788199067Skuriyama	/*
789199067Skuriyama	 * Allow to disable CLFLUSH feature manually by
790210774Sjhb	 * hw.clflush_disable tunable.
791199067Skuriyama	 */
792199215Skuriyama	if (hw_clflush_disable == 1)
793199067Skuriyama		cpu_feature &= ~CPUID_CLFSH;
794195940Skib
79524113Skato#if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
79624113Skato	/*
79768490Sasmodai	 * OS should flush L1 cache by itself because no PC-98 supports
79824113Skato	 * non-Intel CPUs.  Use wbinvd instruction before DMA transfer
79924113Skato	 * when need_pre_dma_flush = 1, use invd instruction after DMA
80024113Skato	 * transfer when need_post_dma_flush = 1.  If your CPU upgrade
80168490Sasmodai	 * product supports hardware cache control, you can add the
80268489Sasmodai	 * CPU_UPGRADE_HW_CACHE option in your kernel configuration file.
80368490Sasmodai	 * This option eliminates unneeded cache flush instruction(s).
80424113Skato	 */
805187117Sjkim	if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
80624113Skato		switch (cpu) {
80724113Skato#ifdef I486_CPU
80824113Skato		case CPU_486DLC:
80924113Skato			need_post_dma_flush = 1;
81024113Skato			break;
81124113Skato		case CPU_M1SC:
81224113Skato			need_pre_dma_flush = 1;
81324113Skato			break;
81465273Skato		case CPU_CY486DX:
81565273Skato			need_pre_dma_flush = 1;
81665273Skato#ifdef CPU_I486_ON_386
81765273Skato			need_post_dma_flush = 1;
81824113Skato#endif
81965273Skato			break;
82065273Skato#endif
82124113Skato		default:
82224113Skato			break;
82324113Skato		}
824187117Sjkim	} else if (cpu_vendor_id == CPU_VENDOR_AMD) {
82524113Skato		switch (cpu_id & 0xFF0) {
82624113Skato		case 0x470:		/* Enhanced Am486DX2 WB */
82724113Skato		case 0x490:		/* Enhanced Am486DX4 WB */
82824113Skato		case 0x4F0:		/* Am5x86 WB */
82924113Skato			need_pre_dma_flush = 1;
83024113Skato			break;
83124113Skato		}
832187117Sjkim	} else if (cpu_vendor_id == CPU_VENDOR_IBM) {
83324113Skato		need_post_dma_flush = 1;
83424113Skato	} else {
83524113Skato#ifdef CPU_I486_ON_386
83624113Skato		need_pre_dma_flush = 1;
83724113Skato#endif
83824113Skato	}
83968489Sasmodai#endif /* PC98 && !CPU_UPGRADE_HW_CACHE */
84024113Skato}
84124113Skato
84240003Skato#if defined(I586_CPU) && defined(CPU_WT_ALLOC)
84340003Skato/*
84440003Skato * Enable write allocate feature of AMD processors.
84540003Skato * Following two functions require the Maxmem variable being set.
84640003Skato */
84740003Skatovoid
84840003Skatoenable_K5_wt_alloc(void)
84940003Skato{
85040003Skato	u_int64_t	msr;
851214347Sjhb	register_t	saveintr;
85240003Skato
85340003Skato	/*
85440003Skato	 * Write allocate is supported only on models 1, 2, and 3, with
85540003Skato	 * a stepping of 4 or greater.
85640003Skato	 */
85740003Skato	if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
858214347Sjhb		saveintr = intr_disable();
85940003Skato		msr = rdmsr(0x83);		/* HWCR */
86040003Skato		wrmsr(0x83, msr & !(0x10));
86140003Skato
86240003Skato		/*
86340003Skato		 * We have to tell the chip where the top of memory is,
86440003Skato		 * since video cards could have frame bufferes there,
86540003Skato		 * memory-mapped I/O could be there, etc.
86640003Skato		 */
86740003Skato		if(Maxmem > 0)
86840003Skato		  msr = Maxmem / 16;
86940003Skato		else
87040003Skato		  msr = 0;
87140003Skato		msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
87240003Skato#ifdef PC98
87340003Skato		if (!(inb(0x43b) & 4)) {
87440003Skato			wrmsr(0x86, 0x0ff00f0);
87540003Skato			msr |= AMD_WT_ALLOC_PRE;
87640003Skato		}
87740003Skato#else
87840003Skato		/*
87940003Skato		 * There is no way to know wheter 15-16M hole exists or not.
88040003Skato		 * Therefore, we disable write allocate for this range.
88140003Skato		 */
88240003Skato			wrmsr(0x86, 0x0ff00f0);
88340003Skato			msr |= AMD_WT_ALLOC_PRE;
88440003Skato#endif
88540003Skato		wrmsr(0x85, msr);
88640003Skato
88740003Skato		msr=rdmsr(0x83);
88840003Skato		wrmsr(0x83, msr|0x10); /* enable write allocate */
889214347Sjhb		intr_restore(saveintr);
89040003Skato	}
89140003Skato}
89240003Skato
89340003Skatovoid
89440003Skatoenable_K6_wt_alloc(void)
89540003Skato{
89640003Skato	quad_t	size;
89740003Skato	u_int64_t	whcr;
898214346Sjhb	register_t	saveintr;
89940003Skato
900214346Sjhb	saveintr = intr_disable();
90140003Skato	wbinvd();
90240003Skato
90340003Skato#ifdef CPU_DISABLE_CACHE
90440003Skato	/*
90540003Skato	 * Certain K6-2 box becomes unstable when write allocation is
90640003Skato	 * enabled.
90740003Skato	 */
90840003Skato	/*
90940003Skato	 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
91040003Skato	 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
91140003Skato	 * All other bits in TR12 have no effect on the processer's operation.
91240003Skato	 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
91340003Skato	 * on the AMD-K6.
91440003Skato	 */
91540003Skato	wrmsr(0x0000000e, (u_int64_t)0x0008);
91640003Skato#endif
91740003Skato	/* Don't assume that memory size is aligned with 4M. */
91840003Skato	if (Maxmem > 0)
91942732Skato	  size = ((Maxmem >> 8) + 3) >> 2;
92040003Skato	else
92140003Skato	  size = 0;
92240003Skato
92340003Skato	/* Limit is 508M bytes. */
92442732Skato	if (size > 0x7f)
92542732Skato		size = 0x7f;
92642732Skato	whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
92740003Skato
92842112Smsmith#if defined(PC98) || defined(NO_MEMORY_HOLE)
92942732Skato	if (whcr & (0x7fLL << 1)) {
93040003Skato#ifdef PC98
93140003Skato		/*
93240003Skato		 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
93340003Skato		 * 15-16M range.
93440003Skato		 */
93540003Skato		if (!(inb(0x43b) & 4))
93640003Skato			whcr &= ~0x0001LL;
93740003Skato		else
93842112Smsmith#endif
93940003Skato			whcr |=  0x0001LL;
94040003Skato	}
94140003Skato#else
94240003Skato	/*
94340003Skato	 * There is no way to know wheter 15-16M hole exists or not.
94440003Skato	 * Therefore, we disable write allocate for this range.
94540003Skato	 */
94642112Smsmith	whcr &= ~0x0001LL;
94740003Skato#endif
94840003Skato	wrmsr(0x0c0000082, whcr);
94940003Skato
950214346Sjhb	intr_restore(saveintr);
95140003Skato}
95242112Smsmith
95342112Smsmithvoid
95442112Smsmithenable_K6_2_wt_alloc(void)
95542112Smsmith{
95642112Smsmith	quad_t	size;
95742112Smsmith	u_int64_t	whcr;
958214346Sjhb	register_t	saveintr;
95942112Smsmith
960214346Sjhb	saveintr = intr_disable();
96142112Smsmith	wbinvd();
96242112Smsmith
96342112Smsmith#ifdef CPU_DISABLE_CACHE
96442112Smsmith	/*
96542112Smsmith	 * Certain K6-2 box becomes unstable when write allocation is
96642112Smsmith	 * enabled.
96742112Smsmith	 */
96842112Smsmith	/*
96942112Smsmith	 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
97042112Smsmith	 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
97142112Smsmith	 * All other bits in TR12 have no effect on the processer's operation.
97242112Smsmith	 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
97342112Smsmith	 * on the AMD-K6.
97442112Smsmith	 */
97542112Smsmith	wrmsr(0x0000000e, (u_int64_t)0x0008);
97642112Smsmith#endif
97742112Smsmith	/* Don't assume that memory size is aligned with 4M. */
97842112Smsmith	if (Maxmem > 0)
97942112Smsmith	  size = ((Maxmem >> 8) + 3) >> 2;
98042112Smsmith	else
98142112Smsmith	  size = 0;
98242112Smsmith
98342112Smsmith	/* Limit is 4092M bytes. */
98442732Skato	if (size > 0x3fff)
98542732Skato		size = 0x3ff;
98642112Smsmith	whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
98742112Smsmith
98842112Smsmith#if defined(PC98) || defined(NO_MEMORY_HOLE)
98942112Smsmith	if (whcr & (0x3ffLL << 22)) {
99042112Smsmith#ifdef PC98
99142112Smsmith		/*
99242112Smsmith		 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
99342112Smsmith		 * 15-16M range.
99442112Smsmith		 */
99542112Smsmith		if (!(inb(0x43b) & 4))
99642112Smsmith			whcr &= ~(1LL << 16);
99742112Smsmith		else
99842112Smsmith#endif
99942112Smsmith			whcr |=  1LL << 16;
100042112Smsmith	}
100142112Smsmith#else
100242112Smsmith	/*
100342112Smsmith	 * There is no way to know wheter 15-16M hole exists or not.
100442112Smsmith	 * Therefore, we disable write allocate for this range.
100542112Smsmith	 */
100642112Smsmith	whcr &= ~(1LL << 16);
100742112Smsmith#endif
100842112Smsmith	wrmsr(0x0c0000082, whcr);
100942112Smsmith
1010214346Sjhb	intr_restore(saveintr);
101142112Smsmith}
101240003Skato#endif /* I585_CPU && CPU_WT_ALLOC */
101340003Skato
101424113Skato#include "opt_ddb.h"
101524113Skato#ifdef DDB
101624113Skato#include <ddb/ddb.h>
101724113Skato
101824113SkatoDB_SHOW_COMMAND(cyrixreg, cyrixreg)
101924113Skato{
1020214346Sjhb	register_t saveintr;
102124113Skato	u_int	cr0;
102241770Sdillon	u_char	ccr1, ccr2, ccr3;
102341770Sdillon	u_char	ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
102424113Skato
102524113Skato	cr0 = rcr0();
1026187117Sjkim	if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
1027214346Sjhb		saveintr = intr_disable();
102824113Skato
102924113Skato
103025159Skato		if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
103124113Skato			ccr0 = read_cyrix_reg(CCR0);
103224113Skato		}
103324113Skato		ccr1 = read_cyrix_reg(CCR1);
103424113Skato		ccr2 = read_cyrix_reg(CCR2);
103524113Skato		ccr3 = read_cyrix_reg(CCR3);
103630162Skato		if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
103724113Skato			write_cyrix_reg(CCR3, CCR3_MAPEN0);
103824113Skato			ccr4 = read_cyrix_reg(CCR4);
103930162Skato			if ((cpu == CPU_M1) || (cpu == CPU_M2))
104024113Skato				ccr5 = read_cyrix_reg(CCR5);
104124113Skato			else
104224113Skato				pcr0 = read_cyrix_reg(PCR0);
104324113Skato			write_cyrix_reg(CCR3, ccr3);		/* Restore CCR3. */
104424113Skato		}
1045214346Sjhb		intr_restore(saveintr);
104624113Skato
104725159Skato		if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
104824113Skato			printf("CCR0=%x, ", (u_int)ccr0);
104924113Skato
105024113Skato		printf("CCR1=%x, CCR2=%x, CCR3=%x",
105124113Skato			(u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
105230162Skato		if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
105324113Skato			printf(", CCR4=%x, ", (u_int)ccr4);
105430162Skato			if (cpu == CPU_M1SC)
105530162Skato				printf("PCR0=%x\n", pcr0);
105630162Skato			else
105724113Skato				printf("CCR5=%x\n", ccr5);
105824113Skato		}
105924113Skato	}
106024113Skato	printf("CR0=%x\n", cr0);
106124113Skato}
106224113Skato#endif /* DDB */
1063