1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2003 Peter Wemm.
5 * Copyright (c) 1993 The Regents of the University of California.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD$
33 */
34
35/*
36 * Functions to provide access to special i386 instructions.
37 * This in included in sys/systm.h, and that file should be
38 * used in preference to this.
39 */
40
41#ifndef _MACHINE_CPUFUNC_H_
42#define	_MACHINE_CPUFUNC_H_
43
44#ifndef _SYS_CDEFS_H_
45#error this file needs sys/cdefs.h as a prerequisite
46#endif
47
48struct region_descriptor;
49
50#define readb(va)	(*(volatile uint8_t *) (va))
51#define readw(va)	(*(volatile uint16_t *) (va))
52#define readl(va)	(*(volatile uint32_t *) (va))
53#define readq(va)	(*(volatile uint64_t *) (va))
54
55#define writeb(va, d)	(*(volatile uint8_t *) (va) = (d))
56#define writew(va, d)	(*(volatile uint16_t *) (va) = (d))
57#define writel(va, d)	(*(volatile uint32_t *) (va) = (d))
58#define writeq(va, d)	(*(volatile uint64_t *) (va) = (d))
59
60#if defined(__GNUCLIKE_ASM) && defined(__CC_SUPPORTS___INLINE)
61
62static __inline void
63breakpoint(void)
64{
65	__asm __volatile("int $3");
66}
67
68static __inline __pure2 u_int
69bsfl(u_int mask)
70{
71	u_int	result;
72
73	__asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
74	return (result);
75}
76
77static __inline __pure2 u_long
78bsfq(u_long mask)
79{
80	u_long	result;
81
82	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
83	return (result);
84}
85
86static __inline __pure2 u_int
87bsrl(u_int mask)
88{
89	u_int	result;
90
91	__asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
92	return (result);
93}
94
95static __inline __pure2 u_long
96bsrq(u_long mask)
97{
98	u_long	result;
99
100	__asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
101	return (result);
102}
103
104static __inline void
105clflush(u_long addr)
106{
107
108	__asm __volatile("clflush %0" : : "m" (*(char *)addr));
109}
110
111static __inline void
112clflushopt(u_long addr)
113{
114
115	__asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr));
116}
117
118static __inline void
119clwb(u_long addr)
120{
121
122	__asm __volatile("clwb %0" : : "m" (*(char *)addr));
123}
124
125static __inline void
126clts(void)
127{
128
129	__asm __volatile("clts");
130}
131
132static __inline void
133disable_intr(void)
134{
135	__asm __volatile("cli" : : : "memory");
136}
137
138static __inline void
139do_cpuid(u_int ax, u_int *p)
140{
141	__asm __volatile("cpuid"
142			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
143			 :  "0" (ax));
144}
145
146static __inline void
147cpuid_count(u_int ax, u_int cx, u_int *p)
148{
149	__asm __volatile("cpuid"
150			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
151			 :  "0" (ax), "c" (cx));
152}
153
154static __inline void
155enable_intr(void)
156{
157	__asm __volatile("sti");
158}
159
160#ifdef _KERNEL
161
162#define	HAVE_INLINE_FFS
163#define        ffs(x)  __builtin_ffs(x)
164
165#define	HAVE_INLINE_FFSL
166
167static __inline __pure2 int
168ffsl(long mask)
169{
170
171	return (__builtin_ffsl(mask));
172}
173
174#define	HAVE_INLINE_FFSLL
175
176static __inline __pure2 int
177ffsll(long long mask)
178{
179	return (ffsl((long)mask));
180}
181
182#define	HAVE_INLINE_FLS
183
184static __inline __pure2 int
185fls(int mask)
186{
187	return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
188}
189
190#define	HAVE_INLINE_FLSL
191
192static __inline __pure2 int
193flsl(long mask)
194{
195	return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
196}
197
198#define	HAVE_INLINE_FLSLL
199
200static __inline __pure2 int
201flsll(long long mask)
202{
203	return (flsl((long)mask));
204}
205
206#endif /* _KERNEL */
207
208static __inline void
209halt(void)
210{
211	__asm __volatile("hlt");
212}
213
214static __inline u_char
215inb(u_int port)
216{
217	u_char	data;
218
219	__asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port));
220	return (data);
221}
222
223static __inline u_int
224inl(u_int port)
225{
226	u_int	data;
227
228	__asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port));
229	return (data);
230}
231
232static __inline void
233insb(u_int port, void *addr, size_t count)
234{
235	__asm __volatile("cld; rep; insb"
236			 : "+D" (addr), "+c" (count)
237			 : "d" (port)
238			 : "memory");
239}
240
241static __inline void
242insw(u_int port, void *addr, size_t count)
243{
244	__asm __volatile("cld; rep; insw"
245			 : "+D" (addr), "+c" (count)
246			 : "d" (port)
247			 : "memory");
248}
249
250static __inline void
251insl(u_int port, void *addr, size_t count)
252{
253	__asm __volatile("cld; rep; insl"
254			 : "+D" (addr), "+c" (count)
255			 : "d" (port)
256			 : "memory");
257}
258
259static __inline void
260invd(void)
261{
262	__asm __volatile("invd");
263}
264
265static __inline u_short
266inw(u_int port)
267{
268	u_short	data;
269
270	__asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port));
271	return (data);
272}
273
274static __inline void
275outb(u_int port, u_char data)
276{
277	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
278}
279
280static __inline void
281outl(u_int port, u_int data)
282{
283	__asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port));
284}
285
286static __inline void
287outsb(u_int port, const void *addr, size_t count)
288{
289	__asm __volatile("cld; rep; outsb"
290			 : "+S" (addr), "+c" (count)
291			 : "d" (port));
292}
293
294static __inline void
295outsw(u_int port, const void *addr, size_t count)
296{
297	__asm __volatile("cld; rep; outsw"
298			 : "+S" (addr), "+c" (count)
299			 : "d" (port));
300}
301
302static __inline void
303outsl(u_int port, const void *addr, size_t count)
304{
305	__asm __volatile("cld; rep; outsl"
306			 : "+S" (addr), "+c" (count)
307			 : "d" (port));
308}
309
310static __inline void
311outw(u_int port, u_short data)
312{
313	__asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port));
314}
315
316static __inline u_long
317popcntq(u_long mask)
318{
319	u_long result;
320
321	__asm __volatile("popcntq %1,%0" : "=r" (result) : "rm" (mask));
322	return (result);
323}
324
325static __inline void
326lfence(void)
327{
328
329	__asm __volatile("lfence" : : : "memory");
330}
331
332static __inline void
333mfence(void)
334{
335
336	__asm __volatile("mfence" : : : "memory");
337}
338
339static __inline void
340sfence(void)
341{
342
343	__asm __volatile("sfence" : : : "memory");
344}
345
346static __inline void
347ia32_pause(void)
348{
349	__asm __volatile("pause");
350}
351
352static __inline u_long
353read_rflags(void)
354{
355	u_long	rf;
356
357	__asm __volatile("pushfq; popq %0" : "=r" (rf));
358	return (rf);
359}
360
361static __inline uint64_t
362rdmsr(u_int msr)
363{
364	uint32_t low, high;
365
366	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
367	return (low | ((uint64_t)high << 32));
368}
369
370static __inline uint32_t
371rdmsr32(u_int msr)
372{
373	uint32_t low;
374
375	__asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "rdx");
376	return (low);
377}
378
379static __inline uint64_t
380rdpmc(u_int pmc)
381{
382	uint32_t low, high;
383
384	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
385	return (low | ((uint64_t)high << 32));
386}
387
388static __inline uint64_t
389rdtsc(void)
390{
391	uint32_t low, high;
392
393	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
394	return (low | ((uint64_t)high << 32));
395}
396
397static __inline uint64_t
398rdtscp(void)
399{
400	uint32_t low, high;
401
402	__asm __volatile("rdtscp" : "=a" (low), "=d" (high) : : "ecx");
403	return (low | ((uint64_t)high << 32));
404}
405
406static __inline uint32_t
407rdtsc32(void)
408{
409	uint32_t rv;
410
411	__asm __volatile("rdtsc" : "=a" (rv) : : "edx");
412	return (rv);
413}
414
415static __inline uint32_t
416rdtscp32(void)
417{
418	uint32_t rv;
419
420	__asm __volatile("rdtscp" : "=a" (rv) : : "ecx", "edx");
421	return (rv);
422}
423
424static __inline void
425wbinvd(void)
426{
427	__asm __volatile("wbinvd");
428}
429
430static __inline void
431write_rflags(u_long rf)
432{
433	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
434}
435
436static __inline void
437wrmsr(u_int msr, uint64_t newval)
438{
439	uint32_t low, high;
440
441	low = newval;
442	high = newval >> 32;
443	__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
444}
445
446static __inline void
447load_cr0(u_long data)
448{
449
450	__asm __volatile("movq %0,%%cr0" : : "r" (data));
451}
452
453static __inline u_long
454rcr0(void)
455{
456	u_long	data;
457
458	__asm __volatile("movq %%cr0,%0" : "=r" (data));
459	return (data);
460}
461
462static __inline u_long
463rcr2(void)
464{
465	u_long	data;
466
467	__asm __volatile("movq %%cr2,%0" : "=r" (data));
468	return (data);
469}
470
471static __inline void
472load_cr3(u_long data)
473{
474
475	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
476}
477
478static __inline u_long
479rcr3(void)
480{
481	u_long	data;
482
483	__asm __volatile("movq %%cr3,%0" : "=r" (data));
484	return (data);
485}
486
487static __inline void
488load_cr4(u_long data)
489{
490	__asm __volatile("movq %0,%%cr4" : : "r" (data));
491}
492
493static __inline u_long
494rcr4(void)
495{
496	u_long	data;
497
498	__asm __volatile("movq %%cr4,%0" : "=r" (data));
499	return (data);
500}
501
502static __inline u_long
503rxcr(u_int reg)
504{
505	u_int low, high;
506
507	__asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg));
508	return (low | ((uint64_t)high << 32));
509}
510
511static __inline void
512load_xcr(u_int reg, u_long val)
513{
514	u_int low, high;
515
516	low = val;
517	high = val >> 32;
518	__asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high));
519}
520
521/*
522 * Global TLB flush (except for thise for pages marked PG_G)
523 */
524static __inline void
525invltlb(void)
526{
527
528	load_cr3(rcr3());
529}
530
531#ifndef CR4_PGE
532#define	CR4_PGE	0x00000080	/* Page global enable */
533#endif
534
535/*
536 * Perform the guaranteed invalidation of all TLB entries.  This
537 * includes the global entries, and entries in all PCIDs, not only the
538 * current context.  The function works both on non-PCID CPUs and CPUs
539 * with the PCID turned off or on.  See IA-32 SDM Vol. 3a 4.10.4.1
540 * Operations that Invalidate TLBs and Paging-Structure Caches.
541 */
542static __inline void
543invltlb_glob(void)
544{
545	uint64_t cr4;
546
547	cr4 = rcr4();
548	load_cr4(cr4 & ~CR4_PGE);
549	/*
550	 * Although preemption at this point could be detrimental to
551	 * performance, it would not lead to an error.  PG_G is simply
552	 * ignored if CR4.PGE is clear.  Moreover, in case this block
553	 * is re-entered, the load_cr4() either above or below will
554	 * modify CR4.PGE flushing the TLB.
555	 */
556	load_cr4(cr4 | CR4_PGE);
557}
558
559/*
560 * TLB flush for an individual page (even if it has PG_G).
561 * Only works on 486+ CPUs (i386 does not have PG_G).
562 */
563static __inline void
564invlpg(u_long addr)
565{
566
567	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
568}
569
570#define	INVPCID_ADDR	0
571#define	INVPCID_CTX	1
572#define	INVPCID_CTXGLOB	2
573#define	INVPCID_ALLCTX	3
574
575struct invpcid_descr {
576	uint64_t	pcid:12 __packed;
577	uint64_t	pad:52 __packed;
578	uint64_t	addr;
579} __packed;
580
581static __inline void
582invpcid(struct invpcid_descr *d, int type)
583{
584
585	__asm __volatile("invpcid (%0),%1"
586	    : : "r" (d), "r" ((u_long)type) : "memory");
587}
588
589static __inline u_short
590rfs(void)
591{
592	u_short sel;
593	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
594	return (sel);
595}
596
597static __inline u_short
598rgs(void)
599{
600	u_short sel;
601	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
602	return (sel);
603}
604
605static __inline u_short
606rss(void)
607{
608	u_short sel;
609	__asm __volatile("movw %%ss,%0" : "=rm" (sel));
610	return (sel);
611}
612
613static __inline void
614load_ds(u_short sel)
615{
616	__asm __volatile("movw %0,%%ds" : : "rm" (sel));
617}
618
619static __inline void
620load_es(u_short sel)
621{
622	__asm __volatile("movw %0,%%es" : : "rm" (sel));
623}
624
625static __inline void
626cpu_monitor(const void *addr, u_long extensions, u_int hints)
627{
628
629	__asm __volatile("monitor"
630	    : : "a" (addr), "c" (extensions), "d" (hints));
631}
632
633static __inline void
634cpu_mwait(u_long extensions, u_int hints)
635{
636
637	__asm __volatile("mwait" : : "a" (hints), "c" (extensions));
638}
639
640static __inline uint32_t
641rdpkru(void)
642{
643	uint32_t res;
644
645	__asm __volatile("rdpkru" :  "=a" (res) : "c" (0) : "edx");
646	return (res);
647}
648
649static __inline void
650wrpkru(uint32_t mask)
651{
652
653	__asm __volatile("wrpkru" :  : "a" (mask),  "c" (0), "d" (0));
654}
655
656#ifdef _KERNEL
657/* This is defined in <machine/specialreg.h> but is too painful to get to */
658#ifndef	MSR_FSBASE
659#define	MSR_FSBASE	0xc0000100
660#endif
661static __inline void
662load_fs(u_short sel)
663{
664	/* Preserve the fsbase value across the selector load */
665	__asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
666	    : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
667}
668
669#ifndef	MSR_GSBASE
670#define	MSR_GSBASE	0xc0000101
671#endif
672static __inline void
673load_gs(u_short sel)
674{
675	/*
676	 * Preserve the gsbase value across the selector load.
677	 * Note that we have to disable interrupts because the gsbase
678	 * being trashed happens to be the kernel gsbase at the time.
679	 */
680	__asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
681	    : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
682}
683#else
684/* Usable by userland */
685static __inline void
686load_fs(u_short sel)
687{
688	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
689}
690
691static __inline void
692load_gs(u_short sel)
693{
694	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
695}
696#endif
697
698static __inline uint64_t
699rdfsbase(void)
700{
701	uint64_t x;
702
703	__asm __volatile("rdfsbase %0" : "=r" (x));
704	return (x);
705}
706
707static __inline void
708wrfsbase(uint64_t x)
709{
710
711	__asm __volatile("wrfsbase %0" : : "r" (x));
712}
713
714static __inline uint64_t
715rdgsbase(void)
716{
717	uint64_t x;
718
719	__asm __volatile("rdgsbase %0" : "=r" (x));
720	return (x);
721}
722
723static __inline void
724wrgsbase(uint64_t x)
725{
726
727	__asm __volatile("wrgsbase %0" : : "r" (x));
728}
729
730static __inline void
731bare_lgdt(struct region_descriptor *addr)
732{
733	__asm __volatile("lgdt (%0)" : : "r" (addr));
734}
735
736static __inline void
737sgdt(struct region_descriptor *addr)
738{
739	char *loc;
740
741	loc = (char *)addr;
742	__asm __volatile("sgdt %0" : "=m" (*loc) : : "memory");
743}
744
745static __inline void
746lidt(struct region_descriptor *addr)
747{
748	__asm __volatile("lidt (%0)" : : "r" (addr));
749}
750
751static __inline void
752sidt(struct region_descriptor *addr)
753{
754	char *loc;
755
756	loc = (char *)addr;
757	__asm __volatile("sidt %0" : "=m" (*loc) : : "memory");
758}
759
760static __inline void
761lldt(u_short sel)
762{
763	__asm __volatile("lldt %0" : : "r" (sel));
764}
765
766static __inline u_short
767sldt(void)
768{
769	u_short sel;
770
771	__asm __volatile("sldt %0" : "=r" (sel));
772	return (sel);
773}
774
775static __inline void
776ltr(u_short sel)
777{
778	__asm __volatile("ltr %0" : : "r" (sel));
779}
780
781static __inline uint32_t
782read_tr(void)
783{
784	u_short sel;
785
786	__asm __volatile("str %0" : "=r" (sel));
787	return (sel);
788}
789
790static __inline uint64_t
791rdr0(void)
792{
793	uint64_t data;
794	__asm __volatile("movq %%dr0,%0" : "=r" (data));
795	return (data);
796}
797
798static __inline void
799load_dr0(uint64_t dr0)
800{
801	__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
802}
803
804static __inline uint64_t
805rdr1(void)
806{
807	uint64_t data;
808	__asm __volatile("movq %%dr1,%0" : "=r" (data));
809	return (data);
810}
811
812static __inline void
813load_dr1(uint64_t dr1)
814{
815	__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
816}
817
818static __inline uint64_t
819rdr2(void)
820{
821	uint64_t data;
822	__asm __volatile("movq %%dr2,%0" : "=r" (data));
823	return (data);
824}
825
826static __inline void
827load_dr2(uint64_t dr2)
828{
829	__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
830}
831
832static __inline uint64_t
833rdr3(void)
834{
835	uint64_t data;
836	__asm __volatile("movq %%dr3,%0" : "=r" (data));
837	return (data);
838}
839
840static __inline void
841load_dr3(uint64_t dr3)
842{
843	__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
844}
845
846static __inline uint64_t
847rdr6(void)
848{
849	uint64_t data;
850	__asm __volatile("movq %%dr6,%0" : "=r" (data));
851	return (data);
852}
853
854static __inline void
855load_dr6(uint64_t dr6)
856{
857	__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
858}
859
860static __inline uint64_t
861rdr7(void)
862{
863	uint64_t data;
864	__asm __volatile("movq %%dr7,%0" : "=r" (data));
865	return (data);
866}
867
868static __inline void
869load_dr7(uint64_t dr7)
870{
871	__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
872}
873
874static __inline register_t
875intr_disable(void)
876{
877	register_t rflags;
878
879	rflags = read_rflags();
880	disable_intr();
881	return (rflags);
882}
883
884static __inline void
885intr_restore(register_t rflags)
886{
887	write_rflags(rflags);
888}
889
890static __inline void
891stac(void)
892{
893
894	__asm __volatile("stac" : : : "cc");
895}
896
897static __inline void
898clac(void)
899{
900
901	__asm __volatile("clac" : : : "cc");
902}
903
904enum {
905	SGX_ECREATE	= 0x0,
906	SGX_EADD	= 0x1,
907	SGX_EINIT	= 0x2,
908	SGX_EREMOVE	= 0x3,
909	SGX_EDGBRD	= 0x4,
910	SGX_EDGBWR	= 0x5,
911	SGX_EEXTEND	= 0x6,
912	SGX_ELDU	= 0x8,
913	SGX_EBLOCK	= 0x9,
914	SGX_EPA		= 0xA,
915	SGX_EWB		= 0xB,
916	SGX_ETRACK	= 0xC,
917};
918
919enum {
920	SGX_PT_SECS = 0x00,
921	SGX_PT_TCS  = 0x01,
922	SGX_PT_REG  = 0x02,
923	SGX_PT_VA   = 0x03,
924	SGX_PT_TRIM = 0x04,
925};
926
927int sgx_encls(uint32_t eax, uint64_t rbx, uint64_t rcx, uint64_t rdx);
928
929static __inline int
930sgx_ecreate(void *pginfo, void *secs)
931{
932
933	return (sgx_encls(SGX_ECREATE, (uint64_t)pginfo,
934	    (uint64_t)secs, 0));
935}
936
937static __inline int
938sgx_eadd(void *pginfo, void *epc)
939{
940
941	return (sgx_encls(SGX_EADD, (uint64_t)pginfo,
942	    (uint64_t)epc, 0));
943}
944
945static __inline int
946sgx_einit(void *sigstruct, void *secs, void *einittoken)
947{
948
949	return (sgx_encls(SGX_EINIT, (uint64_t)sigstruct,
950	    (uint64_t)secs, (uint64_t)einittoken));
951}
952
953static __inline int
954sgx_eextend(void *secs, void *epc)
955{
956
957	return (sgx_encls(SGX_EEXTEND, (uint64_t)secs,
958	    (uint64_t)epc, 0));
959}
960
961static __inline int
962sgx_epa(void *epc)
963{
964
965	return (sgx_encls(SGX_EPA, SGX_PT_VA, (uint64_t)epc, 0));
966}
967
968static __inline int
969sgx_eldu(uint64_t rbx, uint64_t rcx,
970    uint64_t rdx)
971{
972
973	return (sgx_encls(SGX_ELDU, rbx, rcx, rdx));
974}
975
976static __inline int
977sgx_eremove(void *epc)
978{
979
980	return (sgx_encls(SGX_EREMOVE, 0, (uint64_t)epc, 0));
981}
982
983#else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */
984
985int	breakpoint(void);
986u_int	bsfl(u_int mask);
987u_int	bsrl(u_int mask);
988void	clflush(u_long addr);
989void	clts(void);
990void	cpuid_count(u_int ax, u_int cx, u_int *p);
991void	disable_intr(void);
992void	do_cpuid(u_int ax, u_int *p);
993void	enable_intr(void);
994void	halt(void);
995void	ia32_pause(void);
996u_char	inb(u_int port);
997u_int	inl(u_int port);
998void	insb(u_int port, void *addr, size_t count);
999void	insl(u_int port, void *addr, size_t count);
1000void	insw(u_int port, void *addr, size_t count);
1001register_t	intr_disable(void);
1002void	intr_restore(register_t rf);
1003void	invd(void);
1004void	invlpg(u_int addr);
1005void	invltlb(void);
1006u_short	inw(u_int port);
1007void	lidt(struct region_descriptor *addr);
1008void	lldt(u_short sel);
1009void	load_cr0(u_long cr0);
1010void	load_cr3(u_long cr3);
1011void	load_cr4(u_long cr4);
1012void	load_dr0(uint64_t dr0);
1013void	load_dr1(uint64_t dr1);
1014void	load_dr2(uint64_t dr2);
1015void	load_dr3(uint64_t dr3);
1016void	load_dr6(uint64_t dr6);
1017void	load_dr7(uint64_t dr7);
1018void	load_fs(u_short sel);
1019void	load_gs(u_short sel);
1020void	ltr(u_short sel);
1021void	outb(u_int port, u_char data);
1022void	outl(u_int port, u_int data);
1023void	outsb(u_int port, const void *addr, size_t count);
1024void	outsl(u_int port, const void *addr, size_t count);
1025void	outsw(u_int port, const void *addr, size_t count);
1026void	outw(u_int port, u_short data);
1027u_long	rcr0(void);
1028u_long	rcr2(void);
1029u_long	rcr3(void);
1030u_long	rcr4(void);
1031uint64_t rdmsr(u_int msr);
1032uint32_t rdmsr32(u_int msr);
1033uint64_t rdpmc(u_int pmc);
1034uint64_t rdr0(void);
1035uint64_t rdr1(void);
1036uint64_t rdr2(void);
1037uint64_t rdr3(void);
1038uint64_t rdr6(void);
1039uint64_t rdr7(void);
1040uint64_t rdtsc(void);
1041u_long	read_rflags(void);
1042u_int	rfs(void);
1043u_int	rgs(void);
1044void	wbinvd(void);
1045void	write_rflags(u_int rf);
1046void	wrmsr(u_int msr, uint64_t newval);
1047
1048#endif	/* __GNUCLIKE_ASM && __CC_SUPPORTS___INLINE */
1049
1050void	reset_dbregs(void);
1051
1052#ifdef _KERNEL
1053int	rdmsr_safe(u_int msr, uint64_t *val);
1054int	wrmsr_safe(u_int msr, uint64_t newval);
1055#endif
1056
1057#endif /* !_MACHINE_CPUFUNC_H_ */
1058