support.s revision 327959
1/*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: stable/11/sys/i386/i386/support.s 327959 2018-01-14 09:29:06Z kib $
30 */
31
32#include <machine/asmacros.h>
33#include <machine/cputypes.h>
34#include <machine/pmap.h>
35#include <machine/specialreg.h>
36
37#include "assym.s"
38
39#define IDXSHIFT	10
40
41	.text
42
43/*
44 * bcopy family
45 * void bzero(void *buf, u_int len)
46 */
47ENTRY(bzero)
48	pushl	%edi
49	movl	8(%esp),%edi
50	movl	12(%esp),%ecx
51	xorl	%eax,%eax
52	shrl	$2,%ecx
53	cld
54	rep
55	stosl
56	movl	12(%esp),%ecx
57	andl	$3,%ecx
58	rep
59	stosb
60	popl	%edi
61	ret
62END(bzero)
63
64ENTRY(sse2_pagezero)
65	pushl	%ebx
66	movl	8(%esp),%ecx
67	movl	%ecx,%eax
68	addl	$4096,%eax
69	xor	%ebx,%ebx
701:
71	movnti	%ebx,(%ecx)
72	addl	$4,%ecx
73	cmpl	%ecx,%eax
74	jne	1b
75	sfence
76	popl	%ebx
77	ret
78END(sse2_pagezero)
79
80ENTRY(i686_pagezero)
81	pushl	%edi
82	pushl	%ebx
83
84	movl	12(%esp),%edi
85	movl	$1024,%ecx
86	cld
87
88	ALIGN_TEXT
891:
90	xorl	%eax,%eax
91	repe
92	scasl
93	jnz	2f
94
95	popl	%ebx
96	popl	%edi
97	ret
98
99	ALIGN_TEXT
100
1012:
102	incl	%ecx
103	subl	$4,%edi
104
105	movl	%ecx,%edx
106	cmpl	$16,%ecx
107
108	jge	3f
109
110	movl	%edi,%ebx
111	andl	$0x3f,%ebx
112	shrl	%ebx
113	shrl	%ebx
114	movl	$16,%ecx
115	subl	%ebx,%ecx
116
1173:
118	subl	%ecx,%edx
119	rep
120	stosl
121
122	movl	%edx,%ecx
123	testl	%edx,%edx
124	jnz	1b
125
126	popl	%ebx
127	popl	%edi
128	ret
129END(i686_pagezero)
130
131/* fillw(pat, base, cnt) */
132ENTRY(fillw)
133	pushl	%edi
134	movl	8(%esp),%eax
135	movl	12(%esp),%edi
136	movl	16(%esp),%ecx
137	cld
138	rep
139	stosw
140	popl	%edi
141	ret
142END(fillw)
143
144ENTRY(bcopyb)
145	pushl	%esi
146	pushl	%edi
147	movl	12(%esp),%esi
148	movl	16(%esp),%edi
149	movl	20(%esp),%ecx
150	movl	%edi,%eax
151	subl	%esi,%eax
152	cmpl	%ecx,%eax			/* overlapping && src < dst? */
153	jb	1f
154	cld					/* nope, copy forwards */
155	rep
156	movsb
157	popl	%edi
158	popl	%esi
159	ret
160
161	ALIGN_TEXT
1621:
163	addl	%ecx,%edi			/* copy backwards. */
164	addl	%ecx,%esi
165	decl	%edi
166	decl	%esi
167	std
168	rep
169	movsb
170	popl	%edi
171	popl	%esi
172	cld
173	ret
174END(bcopyb)
175
176/*
177 * bcopy(src, dst, cnt)
178 *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
179 */
180ENTRY(bcopy)
181	pushl	%ebp
182	movl	%esp,%ebp
183	pushl	%esi
184	pushl	%edi
185	movl	8(%ebp),%esi
186	movl	12(%ebp),%edi
187	movl	16(%ebp),%ecx
188
189	movl	%edi,%eax
190	subl	%esi,%eax
191	cmpl	%ecx,%eax			/* overlapping && src < dst? */
192	jb	1f
193
194	shrl	$2,%ecx				/* copy by 32-bit words */
195	cld					/* nope, copy forwards */
196	rep
197	movsl
198	movl	16(%ebp),%ecx
199	andl	$3,%ecx				/* any bytes left? */
200	rep
201	movsb
202	popl	%edi
203	popl	%esi
204	popl	%ebp
205	ret
206
207	ALIGN_TEXT
2081:
209	addl	%ecx,%edi			/* copy backwards */
210	addl	%ecx,%esi
211	decl	%edi
212	decl	%esi
213	andl	$3,%ecx				/* any fractional bytes? */
214	std
215	rep
216	movsb
217	movl	16(%ebp),%ecx			/* copy remainder by 32-bit words */
218	shrl	$2,%ecx
219	subl	$3,%esi
220	subl	$3,%edi
221	rep
222	movsl
223	popl	%edi
224	popl	%esi
225	cld
226	popl	%ebp
227	ret
228END(bcopy)
229
230/*
231 * Note: memcpy does not support overlapping copies
232 */
233ENTRY(memcpy)
234	pushl	%edi
235	pushl	%esi
236	movl	12(%esp),%edi
237	movl	16(%esp),%esi
238	movl	20(%esp),%ecx
239	movl	%edi,%eax
240	shrl	$2,%ecx				/* copy by 32-bit words */
241	cld					/* nope, copy forwards */
242	rep
243	movsl
244	movl	20(%esp),%ecx
245	andl	$3,%ecx				/* any bytes left? */
246	rep
247	movsb
248	popl	%esi
249	popl	%edi
250	ret
251END(memcpy)
252
253/*****************************************************************************/
254/* copyout and fubyte family                                                 */
255/*****************************************************************************/
256/*
257 * Access user memory from inside the kernel. These routines and possibly
258 * the math- and DOS emulators should be the only places that do this.
259 *
260 * We have to access the memory with user's permissions, so use a segment
261 * selector with RPL 3. For writes to user space we have to additionally
262 * check the PTE for write permission, because the 386 does not check
263 * write permissions when we are executing with EPL 0. The 486 does check
264 * this if the WP bit is set in CR0, so we can use a simpler version here.
265 *
266 * These routines set curpcb->pcb_onfault for the time they execute. When a
267 * protection violation occurs inside the functions, the trap handler
268 * returns to *curpcb->pcb_onfault instead of the function.
269 */
270
271/*
272 * copyout(from_kernel, to_user, len)  - MP SAFE
273 */
274ENTRY(copyout)
275	movl	PCPU(CURPCB),%eax
276	movl	$copyout_fault,PCB_ONFAULT(%eax)
277	pushl	%esi
278	pushl	%edi
279	pushl	%ebx
280	movl	16(%esp),%esi
281	movl	20(%esp),%edi
282	movl	24(%esp),%ebx
283	testl	%ebx,%ebx			/* anything to do? */
284	jz	done_copyout
285
286	/*
287	 * Check explicitly for non-user addresses.  This check is essential
288	 * because it prevents usermode from writing into the kernel.  We do
289	 * not verify anywhere else that the user did not specify a rogue
290	 * address.
291	 */
292	/*
293	 * First, prevent address wrapping.
294	 */
295	movl	%edi,%eax
296	addl	%ebx,%eax
297	jc	copyout_fault
298/*
299 * XXX STOP USING VM_MAXUSER_ADDRESS.
300 * It is an end address, not a max, so every time it is used correctly it
301 * looks like there is an off by one error, and of course it caused an off
302 * by one error in several places.
303 */
304	cmpl	$VM_MAXUSER_ADDRESS,%eax
305	ja	copyout_fault
306
307	/* bcopy(%esi, %edi, %ebx) */
308	movl	%ebx,%ecx
309
310	shrl	$2,%ecx
311	cld
312	rep
313	movsl
314	movb	%bl,%cl
315	andb	$3,%cl
316	rep
317	movsb
318
319done_copyout:
320	popl	%ebx
321	popl	%edi
322	popl	%esi
323	xorl	%eax,%eax
324	movl	PCPU(CURPCB),%edx
325	movl	%eax,PCB_ONFAULT(%edx)
326	ret
327END(copyout)
328
329	ALIGN_TEXT
330copyout_fault:
331	popl	%ebx
332	popl	%edi
333	popl	%esi
334	movl	PCPU(CURPCB),%edx
335	movl	$0,PCB_ONFAULT(%edx)
336	movl	$EFAULT,%eax
337	ret
338
339/*
340 * copyin(from_user, to_kernel, len) - MP SAFE
341 */
342ENTRY(copyin)
343	movl	PCPU(CURPCB),%eax
344	movl	$copyin_fault,PCB_ONFAULT(%eax)
345	pushl	%esi
346	pushl	%edi
347	movl	12(%esp),%esi			/* caddr_t from */
348	movl	16(%esp),%edi			/* caddr_t to */
349	movl	20(%esp),%ecx			/* size_t  len */
350
351	/*
352	 * make sure address is valid
353	 */
354	movl	%esi,%edx
355	addl	%ecx,%edx
356	jc	copyin_fault
357	cmpl	$VM_MAXUSER_ADDRESS,%edx
358	ja	copyin_fault
359
360	movb	%cl,%al
361	shrl	$2,%ecx				/* copy longword-wise */
362	cld
363	rep
364	movsl
365	movb	%al,%cl
366	andb	$3,%cl				/* copy remaining bytes */
367	rep
368	movsb
369
370	popl	%edi
371	popl	%esi
372	xorl	%eax,%eax
373	movl	PCPU(CURPCB),%edx
374	movl	%eax,PCB_ONFAULT(%edx)
375	ret
376END(copyin)
377
378	ALIGN_TEXT
379copyin_fault:
380	popl	%edi
381	popl	%esi
382	movl	PCPU(CURPCB),%edx
383	movl	$0,PCB_ONFAULT(%edx)
384	movl	$EFAULT,%eax
385	ret
386
387/*
388 * casueword.  Compare and set user word.  Returns -1 on fault,
389 * 0 on non-faulting access.  The current value is in *oldp.
390 */
391ALTENTRY(casueword32)
392ENTRY(casueword)
393	movl	PCPU(CURPCB),%ecx
394	movl	$fusufault,PCB_ONFAULT(%ecx)
395	movl	4(%esp),%edx			/* dst */
396	movl	8(%esp),%eax			/* old */
397	movl	16(%esp),%ecx			/* new */
398
399	cmpl	$VM_MAXUSER_ADDRESS-4,%edx	/* verify address is valid */
400	ja	fusufault
401
402#ifdef SMP
403	lock
404#endif
405	cmpxchgl %ecx,(%edx)			/* Compare and set. */
406
407	/*
408	 * The old value is in %eax.  If the store succeeded it will be the
409	 * value we expected (old) from before the store, otherwise it will
410	 * be the current value.
411	 */
412
413	movl	PCPU(CURPCB),%ecx
414	movl	$0,PCB_ONFAULT(%ecx)
415	movl	12(%esp),%edx			/* oldp */
416	movl	%eax,(%edx)
417	xorl	%eax,%eax
418	ret
419END(casueword32)
420END(casueword)
421
422/*
423 * Fetch (load) a 32-bit word, a 16-bit word, or an 8-bit byte from user
424 * memory.
425 */
426
427ALTENTRY(fueword32)
428ENTRY(fueword)
429	movl	PCPU(CURPCB),%ecx
430	movl	$fusufault,PCB_ONFAULT(%ecx)
431	movl	4(%esp),%edx			/* from */
432
433	cmpl	$VM_MAXUSER_ADDRESS-4,%edx	/* verify address is valid */
434	ja	fusufault
435
436	movl	(%edx),%eax
437	movl	$0,PCB_ONFAULT(%ecx)
438	movl	8(%esp),%edx
439	movl	%eax,(%edx)
440	xorl	%eax,%eax
441	ret
442END(fueword32)
443END(fueword)
444
445/*
446 * fuswintr() and suswintr() are specialized variants of fuword16() and
447 * suword16(), respectively.  They are called from the profiling code,
448 * potentially at interrupt time.  If they fail, that's okay; good things
449 * will happen later.  They always fail for now, until the trap code is
450 * able to deal with this.
451 */
452ALTENTRY(suswintr)
453ENTRY(fuswintr)
454	movl	$-1,%eax
455	ret
456END(suswintr)
457END(fuswintr)
458
459ENTRY(fuword16)
460	movl	PCPU(CURPCB),%ecx
461	movl	$fusufault,PCB_ONFAULT(%ecx)
462	movl	4(%esp),%edx
463
464	cmpl	$VM_MAXUSER_ADDRESS-2,%edx
465	ja	fusufault
466
467	movzwl	(%edx),%eax
468	movl	$0,PCB_ONFAULT(%ecx)
469	ret
470END(fuword16)
471
472ENTRY(fubyte)
473	movl	PCPU(CURPCB),%ecx
474	movl	$fusufault,PCB_ONFAULT(%ecx)
475	movl	4(%esp),%edx
476
477	cmpl	$VM_MAXUSER_ADDRESS-1,%edx
478	ja	fusufault
479
480	movzbl	(%edx),%eax
481	movl	$0,PCB_ONFAULT(%ecx)
482	ret
483END(fubyte)
484
485	ALIGN_TEXT
486fusufault:
487	movl	PCPU(CURPCB),%ecx
488	xorl	%eax,%eax
489	movl	%eax,PCB_ONFAULT(%ecx)
490	decl	%eax
491	ret
492
493/*
494 * Store a 32-bit word, a 16-bit word, or an 8-bit byte to user memory.
495 * All these functions are MPSAFE.
496 */
497
498ALTENTRY(suword32)
499ENTRY(suword)
500	movl	PCPU(CURPCB),%ecx
501	movl	$fusufault,PCB_ONFAULT(%ecx)
502	movl	4(%esp),%edx
503
504	cmpl	$VM_MAXUSER_ADDRESS-4,%edx	/* verify address validity */
505	ja	fusufault
506
507	movl	8(%esp),%eax
508	movl	%eax,(%edx)
509	xorl	%eax,%eax
510	movl	PCPU(CURPCB),%ecx
511	movl	%eax,PCB_ONFAULT(%ecx)
512	ret
513END(suword32)
514END(suword)
515
516ENTRY(suword16)
517	movl	PCPU(CURPCB),%ecx
518	movl	$fusufault,PCB_ONFAULT(%ecx)
519	movl	4(%esp),%edx
520
521	cmpl	$VM_MAXUSER_ADDRESS-2,%edx	/* verify address validity */
522	ja	fusufault
523
524	movw	8(%esp),%ax
525	movw	%ax,(%edx)
526	xorl	%eax,%eax
527	movl	PCPU(CURPCB),%ecx		/* restore trashed register */
528	movl	%eax,PCB_ONFAULT(%ecx)
529	ret
530END(suword16)
531
532ENTRY(subyte)
533	movl	PCPU(CURPCB),%ecx
534	movl	$fusufault,PCB_ONFAULT(%ecx)
535	movl	4(%esp),%edx
536
537	cmpl	$VM_MAXUSER_ADDRESS-1,%edx	/* verify address validity */
538	ja	fusufault
539
540	movb	8(%esp),%al
541	movb	%al,(%edx)
542	xorl	%eax,%eax
543	movl	PCPU(CURPCB),%ecx		/* restore trashed register */
544	movl	%eax,PCB_ONFAULT(%ecx)
545	ret
546END(subyte)
547
548/*
549 * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
550 *
551 *	copy a string from from to to, stop when a 0 character is reached.
552 *	return ENAMETOOLONG if string is longer than maxlen, and
553 *	EFAULT on protection violations. If lencopied is non-zero,
554 *	return the actual length in *lencopied.
555 */
556ENTRY(copyinstr)
557	pushl	%esi
558	pushl	%edi
559	movl	PCPU(CURPCB),%ecx
560	movl	$cpystrflt,PCB_ONFAULT(%ecx)
561
562	movl	12(%esp),%esi			/* %esi = from */
563	movl	16(%esp),%edi			/* %edi = to */
564	movl	20(%esp),%edx			/* %edx = maxlen */
565
566	movl	$VM_MAXUSER_ADDRESS,%eax
567
568	/* make sure 'from' is within bounds */
569	subl	%esi,%eax
570	jbe	cpystrflt
571
572	/* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
573	cmpl	%edx,%eax
574	jae	1f
575	movl	%eax,%edx
576	movl	%eax,20(%esp)
5771:
578	incl	%edx
579	cld
580
5812:
582	decl	%edx
583	jz	3f
584
585	lodsb
586	stosb
587	orb	%al,%al
588	jnz	2b
589
590	/* Success -- 0 byte reached */
591	decl	%edx
592	xorl	%eax,%eax
593	jmp	cpystrflt_x
5943:
595	/* edx is zero - return ENAMETOOLONG or EFAULT */
596	cmpl	$VM_MAXUSER_ADDRESS,%esi
597	jae	cpystrflt
5984:
599	movl	$ENAMETOOLONG,%eax
600	jmp	cpystrflt_x
601
602cpystrflt:
603	movl	$EFAULT,%eax
604
605cpystrflt_x:
606	/* set *lencopied and return %eax */
607	movl	PCPU(CURPCB),%ecx
608	movl	$0,PCB_ONFAULT(%ecx)
609	movl	20(%esp),%ecx
610	subl	%edx,%ecx
611	movl	24(%esp),%edx
612	testl	%edx,%edx
613	jz	1f
614	movl	%ecx,(%edx)
6151:
616	popl	%edi
617	popl	%esi
618	ret
619END(copyinstr)
620
621/*
622 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
623 */
624ENTRY(copystr)
625	pushl	%esi
626	pushl	%edi
627
628	movl	12(%esp),%esi			/* %esi = from */
629	movl	16(%esp),%edi			/* %edi = to */
630	movl	20(%esp),%edx			/* %edx = maxlen */
631	incl	%edx
632	cld
6331:
634	decl	%edx
635	jz	4f
636	lodsb
637	stosb
638	orb	%al,%al
639	jnz	1b
640
641	/* Success -- 0 byte reached */
642	decl	%edx
643	xorl	%eax,%eax
644	jmp	6f
6454:
646	/* edx is zero -- return ENAMETOOLONG */
647	movl	$ENAMETOOLONG,%eax
648
6496:
650	/* set *lencopied and return %eax */
651	movl	20(%esp),%ecx
652	subl	%edx,%ecx
653	movl	24(%esp),%edx
654	testl	%edx,%edx
655	jz	7f
656	movl	%ecx,(%edx)
6577:
658	popl	%edi
659	popl	%esi
660	ret
661END(copystr)
662
663ENTRY(bcmp)
664	pushl	%edi
665	pushl	%esi
666	movl	12(%esp),%edi
667	movl	16(%esp),%esi
668	movl	20(%esp),%edx
669
670	movl	%edx,%ecx
671	shrl	$2,%ecx
672	cld					/* compare forwards */
673	repe
674	cmpsl
675	jne	1f
676
677	movl	%edx,%ecx
678	andl	$3,%ecx
679	repe
680	cmpsb
6811:
682	setne	%al
683	movsbl	%al,%eax
684	popl	%esi
685	popl	%edi
686	ret
687END(bcmp)
688
689/*
690 * Handling of special 386 registers and descriptor tables etc
691 */
692/* void lgdt(struct region_descriptor *rdp); */
693ENTRY(lgdt)
694	/* reload the descriptor table */
695	movl	4(%esp),%eax
696	lgdt	(%eax)
697
698	/* flush the prefetch q */
699	jmp	1f
700	nop
7011:
702	/* reload "stale" selectors */
703	movl	$KDSEL,%eax
704	movl	%eax,%ds
705	movl	%eax,%es
706	movl	%eax,%gs
707	movl	%eax,%ss
708	movl	$KPSEL,%eax
709	movl	%eax,%fs
710
711	/* reload code selector by turning return into intersegmental return */
712	movl	(%esp),%eax
713	pushl	%eax
714	movl	$KCSEL,4(%esp)
715	MEXITCOUNT
716	lret
717END(lgdt)
718
719/* ssdtosd(*ssdp,*sdp) */
720ENTRY(ssdtosd)
721	pushl	%ebx
722	movl	8(%esp),%ecx
723	movl	8(%ecx),%ebx
724	shll	$16,%ebx
725	movl	(%ecx),%edx
726	roll	$16,%edx
727	movb	%dh,%bl
728	movb	%dl,%bh
729	rorl	$8,%ebx
730	movl	4(%ecx),%eax
731	movw	%ax,%dx
732	andl	$0xf0000,%eax
733	orl	%eax,%ebx
734	movl	12(%esp),%ecx
735	movl	%edx,(%ecx)
736	movl	%ebx,4(%ecx)
737	popl	%ebx
738	ret
739END(ssdtosd)
740
741/* void reset_dbregs() */
742ENTRY(reset_dbregs)
743	movl	$0,%eax
744	movl	%eax,%dr7	/* disable all breakpoints first */
745	movl	%eax,%dr0
746	movl	%eax,%dr1
747	movl	%eax,%dr2
748	movl	%eax,%dr3
749	movl	%eax,%dr6
750	ret
751END(reset_dbregs)
752
753/*****************************************************************************/
754/* setjump, longjump                                                         */
755/*****************************************************************************/
756
757ENTRY(setjmp)
758	movl	4(%esp),%eax
759	movl	%ebx,(%eax)			/* save ebx */
760	movl	%esp,4(%eax)			/* save esp */
761	movl	%ebp,8(%eax)			/* save ebp */
762	movl	%esi,12(%eax)			/* save esi */
763	movl	%edi,16(%eax)			/* save edi */
764	movl	(%esp),%edx			/* get rta */
765	movl	%edx,20(%eax)			/* save eip */
766	xorl	%eax,%eax			/* return(0); */
767	ret
768END(setjmp)
769
770ENTRY(longjmp)
771	movl	4(%esp),%eax
772	movl	(%eax),%ebx			/* restore ebx */
773	movl	4(%eax),%esp			/* restore esp */
774	movl	8(%eax),%ebp			/* restore ebp */
775	movl	12(%eax),%esi			/* restore esi */
776	movl	16(%eax),%edi			/* restore edi */
777	movl	20(%eax),%edx			/* get rta */
778	movl	%edx,(%esp)			/* put in return frame */
779	xorl	%eax,%eax			/* return(1); */
780	incl	%eax
781	ret
782END(longjmp)
783
784/*
785 * Support for reading MSRs in the safe manner.
786 */
787ENTRY(rdmsr_safe)
788/* int rdmsr_safe(u_int msr, uint64_t *data) */
789	movl	PCPU(CURPCB),%ecx
790	movl	$msr_onfault,PCB_ONFAULT(%ecx)
791
792	movl	4(%esp),%ecx
793	rdmsr
794	movl	8(%esp),%ecx
795	movl	%eax,(%ecx)
796	movl	%edx,4(%ecx)
797	xorl	%eax,%eax
798
799	movl	PCPU(CURPCB),%ecx
800	movl	%eax,PCB_ONFAULT(%ecx)
801
802	ret
803
804/*
805 * Support for writing MSRs in the safe manner.
806 */
807ENTRY(wrmsr_safe)
808/* int wrmsr_safe(u_int msr, uint64_t data) */
809	movl	PCPU(CURPCB),%ecx
810	movl	$msr_onfault,PCB_ONFAULT(%ecx)
811
812	movl	4(%esp),%ecx
813	movl	8(%esp),%eax
814	movl	12(%esp),%edx
815	wrmsr
816	xorl	%eax,%eax
817
818	movl	PCPU(CURPCB),%ecx
819	movl	%eax,PCB_ONFAULT(%ecx)
820
821	ret
822
823/*
824 * MSR operations fault handler
825 */
826	ALIGN_TEXT
827msr_onfault:
828	movl	PCPU(CURPCB),%ecx
829	movl	$0,PCB_ONFAULT(%ecx)
830	movl	$EFAULT,%eax
831	ret
832