1/*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1993 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 4. Neither the name of the University nor the names of its contributors
15 *    may be used to endorse or promote products derived from this software
16 *    without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * $FreeBSD: releng/11.1/sys/amd64/include/cpufunc.h 313148 2017-02-03 12:03:10Z kib $
31 */
32
33/*
34 * Functions to provide access to special i386 instructions.
35 * This in included in sys/systm.h, and that file should be
36 * used in preference to this.
37 */
38
39#ifndef _MACHINE_CPUFUNC_H_
40#define	_MACHINE_CPUFUNC_H_
41
42struct region_descriptor;
43
44#define readb(va)	(*(volatile uint8_t *) (va))
45#define readw(va)	(*(volatile uint16_t *) (va))
46#define readl(va)	(*(volatile uint32_t *) (va))
47#define readq(va)	(*(volatile uint64_t *) (va))
48
49#define writeb(va, d)	(*(volatile uint8_t *) (va) = (d))
50#define writew(va, d)	(*(volatile uint16_t *) (va) = (d))
51#define writel(va, d)	(*(volatile uint32_t *) (va) = (d))
52#define writeq(va, d)	(*(volatile uint64_t *) (va) = (d))
53
54#if defined(__GNUCLIKE_ASM) && defined(__CC_SUPPORTS___INLINE)
55
56static __inline void
57breakpoint(void)
58{
59	__asm __volatile("int $3");
60}
61
62static __inline u_int
63bsfl(u_int mask)
64{
65	u_int	result;
66
67	__asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
68	return (result);
69}
70
71static __inline u_long
72bsfq(u_long mask)
73{
74	u_long	result;
75
76	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
77	return (result);
78}
79
80static __inline u_int
81bsrl(u_int mask)
82{
83	u_int	result;
84
85	__asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
86	return (result);
87}
88
89static __inline u_long
90bsrq(u_long mask)
91{
92	u_long	result;
93
94	__asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
95	return (result);
96}
97
98static __inline void
99clflush(u_long addr)
100{
101
102	__asm __volatile("clflush %0" : : "m" (*(char *)addr));
103}
104
105static __inline void
106clflushopt(u_long addr)
107{
108
109	__asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr));
110}
111
112static __inline void
113clts(void)
114{
115
116	__asm __volatile("clts");
117}
118
119static __inline void
120disable_intr(void)
121{
122	__asm __volatile("cli" : : : "memory");
123}
124
125static __inline void
126do_cpuid(u_int ax, u_int *p)
127{
128	__asm __volatile("cpuid"
129			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
130			 :  "0" (ax));
131}
132
133static __inline void
134cpuid_count(u_int ax, u_int cx, u_int *p)
135{
136	__asm __volatile("cpuid"
137			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
138			 :  "0" (ax), "c" (cx));
139}
140
141static __inline void
142enable_intr(void)
143{
144	__asm __volatile("sti");
145}
146
147#ifdef _KERNEL
148
149#define	HAVE_INLINE_FFS
150#define        ffs(x)  __builtin_ffs(x)
151
152#define	HAVE_INLINE_FFSL
153
154static __inline int
155ffsl(long mask)
156{
157	return (mask == 0 ? mask : (int)bsfq((u_long)mask) + 1);
158}
159
160#define	HAVE_INLINE_FFSLL
161
162static __inline int
163ffsll(long long mask)
164{
165	return (ffsl((long)mask));
166}
167
168#define	HAVE_INLINE_FLS
169
170static __inline int
171fls(int mask)
172{
173	return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
174}
175
176#define	HAVE_INLINE_FLSL
177
178static __inline int
179flsl(long mask)
180{
181	return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
182}
183
184#define	HAVE_INLINE_FLSLL
185
186static __inline int
187flsll(long long mask)
188{
189	return (flsl((long)mask));
190}
191
192#endif /* _KERNEL */
193
194static __inline void
195halt(void)
196{
197	__asm __volatile("hlt");
198}
199
200static __inline u_char
201inb(u_int port)
202{
203	u_char	data;
204
205	__asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port));
206	return (data);
207}
208
209static __inline u_int
210inl(u_int port)
211{
212	u_int	data;
213
214	__asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port));
215	return (data);
216}
217
218static __inline void
219insb(u_int port, void *addr, size_t count)
220{
221	__asm __volatile("cld; rep; insb"
222			 : "+D" (addr), "+c" (count)
223			 : "d" (port)
224			 : "memory");
225}
226
227static __inline void
228insw(u_int port, void *addr, size_t count)
229{
230	__asm __volatile("cld; rep; insw"
231			 : "+D" (addr), "+c" (count)
232			 : "d" (port)
233			 : "memory");
234}
235
236static __inline void
237insl(u_int port, void *addr, size_t count)
238{
239	__asm __volatile("cld; rep; insl"
240			 : "+D" (addr), "+c" (count)
241			 : "d" (port)
242			 : "memory");
243}
244
245static __inline void
246invd(void)
247{
248	__asm __volatile("invd");
249}
250
251static __inline u_short
252inw(u_int port)
253{
254	u_short	data;
255
256	__asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port));
257	return (data);
258}
259
260static __inline void
261outb(u_int port, u_char data)
262{
263	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
264}
265
266static __inline void
267outl(u_int port, u_int data)
268{
269	__asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port));
270}
271
272static __inline void
273outsb(u_int port, const void *addr, size_t count)
274{
275	__asm __volatile("cld; rep; outsb"
276			 : "+S" (addr), "+c" (count)
277			 : "d" (port));
278}
279
280static __inline void
281outsw(u_int port, const void *addr, size_t count)
282{
283	__asm __volatile("cld; rep; outsw"
284			 : "+S" (addr), "+c" (count)
285			 : "d" (port));
286}
287
288static __inline void
289outsl(u_int port, const void *addr, size_t count)
290{
291	__asm __volatile("cld; rep; outsl"
292			 : "+S" (addr), "+c" (count)
293			 : "d" (port));
294}
295
296static __inline void
297outw(u_int port, u_short data)
298{
299	__asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port));
300}
301
302static __inline u_long
303popcntq(u_long mask)
304{
305	u_long result;
306
307	__asm __volatile("popcntq %1,%0" : "=r" (result) : "rm" (mask));
308	return (result);
309}
310
311static __inline void
312lfence(void)
313{
314
315	__asm __volatile("lfence" : : : "memory");
316}
317
318static __inline void
319mfence(void)
320{
321
322	__asm __volatile("mfence" : : : "memory");
323}
324
325static __inline void
326sfence(void)
327{
328
329	__asm __volatile("sfence" : : : "memory");
330}
331
332static __inline void
333ia32_pause(void)
334{
335	__asm __volatile("pause");
336}
337
338static __inline u_long
339read_rflags(void)
340{
341	u_long	rf;
342
343	__asm __volatile("pushfq; popq %0" : "=r" (rf));
344	return (rf);
345}
346
347static __inline uint64_t
348rdmsr(u_int msr)
349{
350	uint32_t low, high;
351
352	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
353	return (low | ((uint64_t)high << 32));
354}
355
356static __inline uint32_t
357rdmsr32(u_int msr)
358{
359	uint32_t low;
360
361	__asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "rdx");
362	return (low);
363}
364
365static __inline uint64_t
366rdpmc(u_int pmc)
367{
368	uint32_t low, high;
369
370	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
371	return (low | ((uint64_t)high << 32));
372}
373
374static __inline uint64_t
375rdtsc(void)
376{
377	uint32_t low, high;
378
379	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
380	return (low | ((uint64_t)high << 32));
381}
382
383static __inline uint32_t
384rdtsc32(void)
385{
386	uint32_t rv;
387
388	__asm __volatile("rdtsc" : "=a" (rv) : : "edx");
389	return (rv);
390}
391
392#ifndef wbinvd
393static __inline void
394wbinvd(void)
395{
396	__asm __volatile("wbinvd");
397}
398#endif
399
400static __inline void
401write_rflags(u_long rf)
402{
403	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
404}
405
406static __inline void
407wrmsr(u_int msr, uint64_t newval)
408{
409	uint32_t low, high;
410
411	low = newval;
412	high = newval >> 32;
413	__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
414}
415
416static __inline void
417load_cr0(u_long data)
418{
419
420	__asm __volatile("movq %0,%%cr0" : : "r" (data));
421}
422
423static __inline u_long
424rcr0(void)
425{
426	u_long	data;
427
428	__asm __volatile("movq %%cr0,%0" : "=r" (data));
429	return (data);
430}
431
432static __inline u_long
433rcr2(void)
434{
435	u_long	data;
436
437	__asm __volatile("movq %%cr2,%0" : "=r" (data));
438	return (data);
439}
440
441static __inline void
442load_cr3(u_long data)
443{
444
445	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
446}
447
448static __inline u_long
449rcr3(void)
450{
451	u_long	data;
452
453	__asm __volatile("movq %%cr3,%0" : "=r" (data));
454	return (data);
455}
456
457static __inline void
458load_cr4(u_long data)
459{
460	__asm __volatile("movq %0,%%cr4" : : "r" (data));
461}
462
463static __inline u_long
464rcr4(void)
465{
466	u_long	data;
467
468	__asm __volatile("movq %%cr4,%0" : "=r" (data));
469	return (data);
470}
471
472static __inline u_long
473rxcr(u_int reg)
474{
475	u_int low, high;
476
477	__asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg));
478	return (low | ((uint64_t)high << 32));
479}
480
481static __inline void
482load_xcr(u_int reg, u_long val)
483{
484	u_int low, high;
485
486	low = val;
487	high = val >> 32;
488	__asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high));
489}
490
491/*
492 * Global TLB flush (except for thise for pages marked PG_G)
493 */
494static __inline void
495invltlb(void)
496{
497
498	load_cr3(rcr3());
499}
500
501#ifndef CR4_PGE
502#define	CR4_PGE	0x00000080	/* Page global enable */
503#endif
504
505/*
506 * Perform the guaranteed invalidation of all TLB entries.  This
507 * includes the global entries, and entries in all PCIDs, not only the
508 * current context.  The function works both on non-PCID CPUs and CPUs
509 * with the PCID turned off or on.  See IA-32 SDM Vol. 3a 4.10.4.1
510 * Operations that Invalidate TLBs and Paging-Structure Caches.
511 */
512static __inline void
513invltlb_glob(void)
514{
515	uint64_t cr4;
516
517	cr4 = rcr4();
518	load_cr4(cr4 & ~CR4_PGE);
519	/*
520	 * Although preemption at this point could be detrimental to
521	 * performance, it would not lead to an error.  PG_G is simply
522	 * ignored if CR4.PGE is clear.  Moreover, in case this block
523	 * is re-entered, the load_cr4() either above or below will
524	 * modify CR4.PGE flushing the TLB.
525	 */
526	load_cr4(cr4 | CR4_PGE);
527}
528
529/*
530 * TLB flush for an individual page (even if it has PG_G).
531 * Only works on 486+ CPUs (i386 does not have PG_G).
532 */
533static __inline void
534invlpg(u_long addr)
535{
536
537	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
538}
539
540#define	INVPCID_ADDR	0
541#define	INVPCID_CTX	1
542#define	INVPCID_CTXGLOB	2
543#define	INVPCID_ALLCTX	3
544
545struct invpcid_descr {
546	uint64_t	pcid:12 __packed;
547	uint64_t	pad:52 __packed;
548	uint64_t	addr;
549} __packed;
550
551static __inline void
552invpcid(struct invpcid_descr *d, int type)
553{
554
555	__asm __volatile("invpcid (%0),%1"
556	    : : "r" (d), "r" ((u_long)type) : "memory");
557}
558
559static __inline u_short
560rfs(void)
561{
562	u_short sel;
563	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
564	return (sel);
565}
566
567static __inline u_short
568rgs(void)
569{
570	u_short sel;
571	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
572	return (sel);
573}
574
575static __inline u_short
576rss(void)
577{
578	u_short sel;
579	__asm __volatile("movw %%ss,%0" : "=rm" (sel));
580	return (sel);
581}
582
583static __inline void
584load_ds(u_short sel)
585{
586	__asm __volatile("movw %0,%%ds" : : "rm" (sel));
587}
588
589static __inline void
590load_es(u_short sel)
591{
592	__asm __volatile("movw %0,%%es" : : "rm" (sel));
593}
594
595static __inline void
596cpu_monitor(const void *addr, u_long extensions, u_int hints)
597{
598
599	__asm __volatile("monitor"
600	    : : "a" (addr), "c" (extensions), "d" (hints));
601}
602
603static __inline void
604cpu_mwait(u_long extensions, u_int hints)
605{
606
607	__asm __volatile("mwait" : : "a" (hints), "c" (extensions));
608}
609
610#ifdef _KERNEL
611/* This is defined in <machine/specialreg.h> but is too painful to get to */
612#ifndef	MSR_FSBASE
613#define	MSR_FSBASE	0xc0000100
614#endif
615static __inline void
616load_fs(u_short sel)
617{
618	/* Preserve the fsbase value across the selector load */
619	__asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
620	    : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
621}
622
623#ifndef	MSR_GSBASE
624#define	MSR_GSBASE	0xc0000101
625#endif
626static __inline void
627load_gs(u_short sel)
628{
629	/*
630	 * Preserve the gsbase value across the selector load.
631	 * Note that we have to disable interrupts because the gsbase
632	 * being trashed happens to be the kernel gsbase at the time.
633	 */
634	__asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
635	    : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
636}
637#else
638/* Usable by userland */
639static __inline void
640load_fs(u_short sel)
641{
642	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
643}
644
645static __inline void
646load_gs(u_short sel)
647{
648	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
649}
650#endif
651
652static __inline void
653bare_lgdt(struct region_descriptor *addr)
654{
655	__asm __volatile("lgdt (%0)" : : "r" (addr));
656}
657
658static __inline void
659sgdt(struct region_descriptor *addr)
660{
661	char *loc;
662
663	loc = (char *)addr;
664	__asm __volatile("sgdt %0" : "=m" (*loc) : : "memory");
665}
666
667static __inline void
668lidt(struct region_descriptor *addr)
669{
670	__asm __volatile("lidt (%0)" : : "r" (addr));
671}
672
673static __inline void
674sidt(struct region_descriptor *addr)
675{
676	char *loc;
677
678	loc = (char *)addr;
679	__asm __volatile("sidt %0" : "=m" (*loc) : : "memory");
680}
681
682static __inline void
683lldt(u_short sel)
684{
685	__asm __volatile("lldt %0" : : "r" (sel));
686}
687
688static __inline void
689ltr(u_short sel)
690{
691	__asm __volatile("ltr %0" : : "r" (sel));
692}
693
694static __inline uint32_t
695read_tr(void)
696{
697	u_short sel;
698
699	__asm __volatile("str %0" : "=r" (sel));
700	return (sel);
701}
702
703static __inline uint64_t
704rdr0(void)
705{
706	uint64_t data;
707	__asm __volatile("movq %%dr0,%0" : "=r" (data));
708	return (data);
709}
710
711static __inline void
712load_dr0(uint64_t dr0)
713{
714	__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
715}
716
717static __inline uint64_t
718rdr1(void)
719{
720	uint64_t data;
721	__asm __volatile("movq %%dr1,%0" : "=r" (data));
722	return (data);
723}
724
725static __inline void
726load_dr1(uint64_t dr1)
727{
728	__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
729}
730
731static __inline uint64_t
732rdr2(void)
733{
734	uint64_t data;
735	__asm __volatile("movq %%dr2,%0" : "=r" (data));
736	return (data);
737}
738
739static __inline void
740load_dr2(uint64_t dr2)
741{
742	__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
743}
744
745static __inline uint64_t
746rdr3(void)
747{
748	uint64_t data;
749	__asm __volatile("movq %%dr3,%0" : "=r" (data));
750	return (data);
751}
752
753static __inline void
754load_dr3(uint64_t dr3)
755{
756	__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
757}
758
759static __inline uint64_t
760rdr4(void)
761{
762	uint64_t data;
763	__asm __volatile("movq %%dr4,%0" : "=r" (data));
764	return (data);
765}
766
767static __inline void
768load_dr4(uint64_t dr4)
769{
770	__asm __volatile("movq %0,%%dr4" : : "r" (dr4));
771}
772
773static __inline uint64_t
774rdr5(void)
775{
776	uint64_t data;
777	__asm __volatile("movq %%dr5,%0" : "=r" (data));
778	return (data);
779}
780
781static __inline void
782load_dr5(uint64_t dr5)
783{
784	__asm __volatile("movq %0,%%dr5" : : "r" (dr5));
785}
786
787static __inline uint64_t
788rdr6(void)
789{
790	uint64_t data;
791	__asm __volatile("movq %%dr6,%0" : "=r" (data));
792	return (data);
793}
794
795static __inline void
796load_dr6(uint64_t dr6)
797{
798	__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
799}
800
801static __inline uint64_t
802rdr7(void)
803{
804	uint64_t data;
805	__asm __volatile("movq %%dr7,%0" : "=r" (data));
806	return (data);
807}
808
809static __inline void
810load_dr7(uint64_t dr7)
811{
812	__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
813}
814
815#ifndef __HAIKU__
816static __inline register_t
817intr_disable(void)
818{
819	register_t rflags;
820
821	rflags = read_rflags();
822	disable_intr();
823	return (rflags);
824}
825
826static __inline void
827intr_restore(register_t rflags)
828{
829	write_rflags(rflags);
830}
831#endif
832
833#else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */
834
835int	breakpoint(void);
836u_int	bsfl(u_int mask);
837u_int	bsrl(u_int mask);
838void	clflush(u_long addr);
839void	clts(void);
840void	cpuid_count(u_int ax, u_int cx, u_int *p);
841void	disable_intr(void);
842void	do_cpuid(u_int ax, u_int *p);
843void	enable_intr(void);
844void	halt(void);
845void	ia32_pause(void);
846u_char	inb(u_int port);
847u_int	inl(u_int port);
848void	insb(u_int port, void *addr, size_t count);
849void	insl(u_int port, void *addr, size_t count);
850void	insw(u_int port, void *addr, size_t count);
851register_t	intr_disable(void);
852void	intr_restore(register_t rf);
853void	invd(void);
854void	invlpg(u_int addr);
855void	invltlb(void);
856u_short	inw(u_int port);
857void	lidt(struct region_descriptor *addr);
858void	lldt(u_short sel);
859void	load_cr0(u_long cr0);
860void	load_cr3(u_long cr3);
861void	load_cr4(u_long cr4);
862void	load_dr0(uint64_t dr0);
863void	load_dr1(uint64_t dr1);
864void	load_dr2(uint64_t dr2);
865void	load_dr3(uint64_t dr3);
866void	load_dr4(uint64_t dr4);
867void	load_dr5(uint64_t dr5);
868void	load_dr6(uint64_t dr6);
869void	load_dr7(uint64_t dr7);
870void	load_fs(u_short sel);
871void	load_gs(u_short sel);
872void	ltr(u_short sel);
873void	outb(u_int port, u_char data);
874void	outl(u_int port, u_int data);
875void	outsb(u_int port, const void *addr, size_t count);
876void	outsl(u_int port, const void *addr, size_t count);
877void	outsw(u_int port, const void *addr, size_t count);
878void	outw(u_int port, u_short data);
879u_long	rcr0(void);
880u_long	rcr2(void);
881u_long	rcr3(void);
882u_long	rcr4(void);
883uint64_t rdmsr(u_int msr);
884uint32_t rdmsr32(u_int msr);
885uint64_t rdpmc(u_int pmc);
886uint64_t rdr0(void);
887uint64_t rdr1(void);
888uint64_t rdr2(void);
889uint64_t rdr3(void);
890uint64_t rdr4(void);
891uint64_t rdr5(void);
892uint64_t rdr6(void);
893uint64_t rdr7(void);
894uint64_t rdtsc(void);
895u_long	read_rflags(void);
896u_int	rfs(void);
897u_int	rgs(void);
898void	wbinvd(void);
899void	write_rflags(u_int rf);
900void	wrmsr(u_int msr, uint64_t newval);
901
902#endif	/* __GNUCLIKE_ASM && __CC_SUPPORTS___INLINE */
903
904void	reset_dbregs(void);
905
906#ifdef _KERNEL
907int	rdmsr_safe(u_int msr, uint64_t *val);
908int	wrmsr_safe(u_int msr, uint64_t newval);
909#endif
910
911#endif /* !_MACHINE_CPUFUNC_H_ */
912