support.s revision 215854
1/*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: head/sys/i386/i386/support.s 215854 2010-11-26 08:11:43Z uqs $
30 */
31
32#include "opt_npx.h"
33
34#include <machine/asmacros.h>
35#include <machine/cputypes.h>
36#include <machine/intr_machdep.h>
37#include <machine/pmap.h>
38#include <machine/specialreg.h>
39
40#include "assym.s"
41
42#define IDXSHIFT	10
43
44	.data
45	ALIGN_DATA
46	.globl	intrcnt, eintrcnt
47intrcnt:
48	.space	INTRCNT_COUNT * 4
49eintrcnt:
50
51	.globl	intrnames, eintrnames
52intrnames:
53	.space	INTRCNT_COUNT * (MAXCOMLEN + 1)
54eintrnames:
55
56	.text
57
58/*
59 * bcopy family
60 * void bzero(void *buf, u_int len)
61 */
62ENTRY(bzero)
63	pushl	%edi
64	movl	8(%esp),%edi
65	movl	12(%esp),%ecx
66	xorl	%eax,%eax
67	shrl	$2,%ecx
68	cld
69	rep
70	stosl
71	movl	12(%esp),%ecx
72	andl	$3,%ecx
73	rep
74	stosb
75	popl	%edi
76	ret
77END(bzero)
78
79ENTRY(sse2_pagezero)
80	pushl	%ebx
81	movl	8(%esp),%ecx
82	movl	%ecx,%eax
83	addl	$4096,%eax
84	xor	%ebx,%ebx
851:
86	movnti	%ebx,(%ecx)
87	addl	$4,%ecx
88	cmpl	%ecx,%eax
89	jne	1b
90	sfence
91	popl	%ebx
92	ret
93END(sse2_pagezero)
94
95ENTRY(i686_pagezero)
96	pushl	%edi
97	pushl	%ebx
98
99	movl	12(%esp),%edi
100	movl	$1024,%ecx
101	cld
102
103	ALIGN_TEXT
1041:
105	xorl	%eax,%eax
106	repe
107	scasl
108	jnz	2f
109
110	popl	%ebx
111	popl	%edi
112	ret
113
114	ALIGN_TEXT
115
1162:
117	incl	%ecx
118	subl	$4,%edi
119
120	movl	%ecx,%edx
121	cmpl	$16,%ecx
122
123	jge	3f
124
125	movl	%edi,%ebx
126	andl	$0x3f,%ebx
127	shrl	%ebx
128	shrl	%ebx
129	movl	$16,%ecx
130	subl	%ebx,%ecx
131
1323:
133	subl	%ecx,%edx
134	rep
135	stosl
136
137	movl	%edx,%ecx
138	testl	%edx,%edx
139	jnz	1b
140
141	popl	%ebx
142	popl	%edi
143	ret
144END(i686_pagezero)
145
146/* fillw(pat, base, cnt) */
147ENTRY(fillw)
148	pushl	%edi
149	movl	8(%esp),%eax
150	movl	12(%esp),%edi
151	movl	16(%esp),%ecx
152	cld
153	rep
154	stosw
155	popl	%edi
156	ret
157END(fillw)
158
159ENTRY(bcopyb)
160	pushl	%esi
161	pushl	%edi
162	movl	12(%esp),%esi
163	movl	16(%esp),%edi
164	movl	20(%esp),%ecx
165	movl	%edi,%eax
166	subl	%esi,%eax
167	cmpl	%ecx,%eax			/* overlapping && src < dst? */
168	jb	1f
169	cld					/* nope, copy forwards */
170	rep
171	movsb
172	popl	%edi
173	popl	%esi
174	ret
175
176	ALIGN_TEXT
1771:
178	addl	%ecx,%edi			/* copy backwards. */
179	addl	%ecx,%esi
180	decl	%edi
181	decl	%esi
182	std
183	rep
184	movsb
185	popl	%edi
186	popl	%esi
187	cld
188	ret
189END(bcopyb)
190
191/*
192 * bcopy(src, dst, cnt)
193 *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
194 */
195ENTRY(bcopy)
196	pushl	%esi
197	pushl	%edi
198	movl	12(%esp),%esi
199	movl	16(%esp),%edi
200	movl	20(%esp),%ecx
201
202	movl	%edi,%eax
203	subl	%esi,%eax
204	cmpl	%ecx,%eax			/* overlapping && src < dst? */
205	jb	1f
206
207	shrl	$2,%ecx				/* copy by 32-bit words */
208	cld					/* nope, copy forwards */
209	rep
210	movsl
211	movl	20(%esp),%ecx
212	andl	$3,%ecx				/* any bytes left? */
213	rep
214	movsb
215	popl	%edi
216	popl	%esi
217	ret
218
219	ALIGN_TEXT
2201:
221	addl	%ecx,%edi			/* copy backwards */
222	addl	%ecx,%esi
223	decl	%edi
224	decl	%esi
225	andl	$3,%ecx				/* any fractional bytes? */
226	std
227	rep
228	movsb
229	movl	20(%esp),%ecx			/* copy remainder by 32-bit words */
230	shrl	$2,%ecx
231	subl	$3,%esi
232	subl	$3,%edi
233	rep
234	movsl
235	popl	%edi
236	popl	%esi
237	cld
238	ret
239END(bcopy)
240
241/*
242 * Note: memcpy does not support overlapping copies
243 */
244ENTRY(memcpy)
245	pushl	%edi
246	pushl	%esi
247	movl	12(%esp),%edi
248	movl	16(%esp),%esi
249	movl	20(%esp),%ecx
250	movl	%edi,%eax
251	shrl	$2,%ecx				/* copy by 32-bit words */
252	cld					/* nope, copy forwards */
253	rep
254	movsl
255	movl	20(%esp),%ecx
256	andl	$3,%ecx				/* any bytes left? */
257	rep
258	movsb
259	popl	%esi
260	popl	%edi
261	ret
262END(memcpy)
263
264/*****************************************************************************/
265/* copyout and fubyte family                                                 */
266/*****************************************************************************/
267/*
268 * Access user memory from inside the kernel. These routines and possibly
269 * the math- and DOS emulators should be the only places that do this.
270 *
271 * We have to access the memory with user's permissions, so use a segment
272 * selector with RPL 3. For writes to user space we have to additionally
273 * check the PTE for write permission, because the 386 does not check
274 * write permissions when we are executing with EPL 0. The 486 does check
275 * this if the WP bit is set in CR0, so we can use a simpler version here.
276 *
277 * These routines set curpcb->onfault for the time they execute. When a
278 * protection violation occurs inside the functions, the trap handler
279 * returns to *curpcb->onfault instead of the function.
280 */
281
282/*
283 * copyout(from_kernel, to_user, len)  - MP SAFE
284 */
285ENTRY(copyout)
286	movl	PCPU(CURPCB),%eax
287	movl	$copyout_fault,PCB_ONFAULT(%eax)
288	pushl	%esi
289	pushl	%edi
290	pushl	%ebx
291	movl	16(%esp),%esi
292	movl	20(%esp),%edi
293	movl	24(%esp),%ebx
294	testl	%ebx,%ebx			/* anything to do? */
295	jz	done_copyout
296
297	/*
298	 * Check explicitly for non-user addresses.  If 486 write protection
299	 * is being used, this check is essential because we are in kernel
300	 * mode so the h/w does not provide any protection against writing
301	 * kernel addresses.
302	 */
303
304	/*
305	 * First, prevent address wrapping.
306	 */
307	movl	%edi,%eax
308	addl	%ebx,%eax
309	jc	copyout_fault
310/*
311 * XXX STOP USING VM_MAXUSER_ADDRESS.
312 * It is an end address, not a max, so every time it is used correctly it
313 * looks like there is an off by one error, and of course it caused an off
314 * by one error in several places.
315 */
316	cmpl	$VM_MAXUSER_ADDRESS,%eax
317	ja	copyout_fault
318
319	/* bcopy(%esi, %edi, %ebx) */
320	movl	%ebx,%ecx
321
322	shrl	$2,%ecx
323	cld
324	rep
325	movsl
326	movb	%bl,%cl
327	andb	$3,%cl
328	rep
329	movsb
330
331done_copyout:
332	popl	%ebx
333	popl	%edi
334	popl	%esi
335	xorl	%eax,%eax
336	movl	PCPU(CURPCB),%edx
337	movl	%eax,PCB_ONFAULT(%edx)
338	ret
339END(copyout)
340
341	ALIGN_TEXT
342copyout_fault:
343	popl	%ebx
344	popl	%edi
345	popl	%esi
346	movl	PCPU(CURPCB),%edx
347	movl	$0,PCB_ONFAULT(%edx)
348	movl	$EFAULT,%eax
349	ret
350
351/*
352 * copyin(from_user, to_kernel, len) - MP SAFE
353 */
354ENTRY(copyin)
355	movl	PCPU(CURPCB),%eax
356	movl	$copyin_fault,PCB_ONFAULT(%eax)
357	pushl	%esi
358	pushl	%edi
359	movl	12(%esp),%esi			/* caddr_t from */
360	movl	16(%esp),%edi			/* caddr_t to */
361	movl	20(%esp),%ecx			/* size_t  len */
362
363	/*
364	 * make sure address is valid
365	 */
366	movl	%esi,%edx
367	addl	%ecx,%edx
368	jc	copyin_fault
369	cmpl	$VM_MAXUSER_ADDRESS,%edx
370	ja	copyin_fault
371
372	movb	%cl,%al
373	shrl	$2,%ecx				/* copy longword-wise */
374	cld
375	rep
376	movsl
377	movb	%al,%cl
378	andb	$3,%cl				/* copy remaining bytes */
379	rep
380	movsb
381
382	popl	%edi
383	popl	%esi
384	xorl	%eax,%eax
385	movl	PCPU(CURPCB),%edx
386	movl	%eax,PCB_ONFAULT(%edx)
387	ret
388END(copyin)
389
390	ALIGN_TEXT
391copyin_fault:
392	popl	%edi
393	popl	%esi
394	movl	PCPU(CURPCB),%edx
395	movl	$0,PCB_ONFAULT(%edx)
396	movl	$EFAULT,%eax
397	ret
398
399/*
400 * casuword.  Compare and set user word.  Returns -1 or the current value.
401 */
402
403ALTENTRY(casuword32)
404ENTRY(casuword)
405	movl	PCPU(CURPCB),%ecx
406	movl	$fusufault,PCB_ONFAULT(%ecx)
407	movl	4(%esp),%edx			/* dst */
408	movl	8(%esp),%eax			/* old */
409	movl	12(%esp),%ecx			/* new */
410
411	cmpl	$VM_MAXUSER_ADDRESS-4,%edx	/* verify address is valid */
412	ja	fusufault
413
414#ifdef SMP
415	lock
416#endif
417	cmpxchgl %ecx,(%edx)			/* Compare and set. */
418
419	/*
420	 * The old value is in %eax.  If the store succeeded it will be the
421	 * value we expected (old) from before the store, otherwise it will
422	 * be the current value.
423	 */
424
425	movl	PCPU(CURPCB),%ecx
426	movl	$0,PCB_ONFAULT(%ecx)
427	ret
428END(casuword32)
429END(casuword)
430
431/*
432 * Fetch (load) a 32-bit word, a 16-bit word, or an 8-bit byte from user
433 * memory.  All these functions are MPSAFE.
434 */
435
436ALTENTRY(fuword32)
437ENTRY(fuword)
438	movl	PCPU(CURPCB),%ecx
439	movl	$fusufault,PCB_ONFAULT(%ecx)
440	movl	4(%esp),%edx			/* from */
441
442	cmpl	$VM_MAXUSER_ADDRESS-4,%edx	/* verify address is valid */
443	ja	fusufault
444
445	movl	(%edx),%eax
446	movl	$0,PCB_ONFAULT(%ecx)
447	ret
448END(fuword32)
449END(fuword)
450
451/*
452 * fuswintr() and suswintr() are specialized variants of fuword16() and
453 * suword16(), respectively.  They are called from the profiling code,
454 * potentially at interrupt time.  If they fail, that's okay; good things
455 * will happen later.  They always fail for now, until the trap code is
456 * able to deal with this.
457 */
458ALTENTRY(suswintr)
459ENTRY(fuswintr)
460	movl	$-1,%eax
461	ret
462END(suswintr)
463END(fuswintr)
464
465ENTRY(fuword16)
466	movl	PCPU(CURPCB),%ecx
467	movl	$fusufault,PCB_ONFAULT(%ecx)
468	movl	4(%esp),%edx
469
470	cmpl	$VM_MAXUSER_ADDRESS-2,%edx
471	ja	fusufault
472
473	movzwl	(%edx),%eax
474	movl	$0,PCB_ONFAULT(%ecx)
475	ret
476END(fuword16)
477
478ENTRY(fubyte)
479	movl	PCPU(CURPCB),%ecx
480	movl	$fusufault,PCB_ONFAULT(%ecx)
481	movl	4(%esp),%edx
482
483	cmpl	$VM_MAXUSER_ADDRESS-1,%edx
484	ja	fusufault
485
486	movzbl	(%edx),%eax
487	movl	$0,PCB_ONFAULT(%ecx)
488	ret
489END(fubyte)
490
491	ALIGN_TEXT
492fusufault:
493	movl	PCPU(CURPCB),%ecx
494	xorl	%eax,%eax
495	movl	%eax,PCB_ONFAULT(%ecx)
496	decl	%eax
497	ret
498
499/*
500 * Store a 32-bit word, a 16-bit word, or an 8-bit byte to user memory.
501 * All these functions are MPSAFE.
502 */
503
504ALTENTRY(suword32)
505ENTRY(suword)
506	movl	PCPU(CURPCB),%ecx
507	movl	$fusufault,PCB_ONFAULT(%ecx)
508	movl	4(%esp),%edx
509
510	cmpl	$VM_MAXUSER_ADDRESS-4,%edx	/* verify address validity */
511	ja	fusufault
512
513	movl	8(%esp),%eax
514	movl	%eax,(%edx)
515	xorl	%eax,%eax
516	movl	PCPU(CURPCB),%ecx
517	movl	%eax,PCB_ONFAULT(%ecx)
518	ret
519END(suword32)
520END(suword)
521
522ENTRY(suword16)
523	movl	PCPU(CURPCB),%ecx
524	movl	$fusufault,PCB_ONFAULT(%ecx)
525	movl	4(%esp),%edx
526
527	cmpl	$VM_MAXUSER_ADDRESS-2,%edx	/* verify address validity */
528	ja	fusufault
529
530	movw	8(%esp),%ax
531	movw	%ax,(%edx)
532	xorl	%eax,%eax
533	movl	PCPU(CURPCB),%ecx		/* restore trashed register */
534	movl	%eax,PCB_ONFAULT(%ecx)
535	ret
536END(suword16)
537
538ENTRY(subyte)
539	movl	PCPU(CURPCB),%ecx
540	movl	$fusufault,PCB_ONFAULT(%ecx)
541	movl	4(%esp),%edx
542
543	cmpl	$VM_MAXUSER_ADDRESS-1,%edx	/* verify address validity */
544	ja	fusufault
545
546	movb	8(%esp),%al
547	movb	%al,(%edx)
548	xorl	%eax,%eax
549	movl	PCPU(CURPCB),%ecx		/* restore trashed register */
550	movl	%eax,PCB_ONFAULT(%ecx)
551	ret
552END(subyte)
553
554/*
555 * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
556 *
557 *	copy a string from from to to, stop when a 0 character is reached.
558 *	return ENAMETOOLONG if string is longer than maxlen, and
559 *	EFAULT on protection violations. If lencopied is non-zero,
560 *	return the actual length in *lencopied.
561 */
562ENTRY(copyinstr)
563	pushl	%esi
564	pushl	%edi
565	movl	PCPU(CURPCB),%ecx
566	movl	$cpystrflt,PCB_ONFAULT(%ecx)
567
568	movl	12(%esp),%esi			/* %esi = from */
569	movl	16(%esp),%edi			/* %edi = to */
570	movl	20(%esp),%edx			/* %edx = maxlen */
571
572	movl	$VM_MAXUSER_ADDRESS,%eax
573
574	/* make sure 'from' is within bounds */
575	subl	%esi,%eax
576	jbe	cpystrflt
577
578	/* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
579	cmpl	%edx,%eax
580	jae	1f
581	movl	%eax,%edx
582	movl	%eax,20(%esp)
5831:
584	incl	%edx
585	cld
586
5872:
588	decl	%edx
589	jz	3f
590
591	lodsb
592	stosb
593	orb	%al,%al
594	jnz	2b
595
596	/* Success -- 0 byte reached */
597	decl	%edx
598	xorl	%eax,%eax
599	jmp	cpystrflt_x
6003:
601	/* edx is zero - return ENAMETOOLONG or EFAULT */
602	cmpl	$VM_MAXUSER_ADDRESS,%esi
603	jae	cpystrflt
6044:
605	movl	$ENAMETOOLONG,%eax
606	jmp	cpystrflt_x
607
608cpystrflt:
609	movl	$EFAULT,%eax
610
611cpystrflt_x:
612	/* set *lencopied and return %eax */
613	movl	PCPU(CURPCB),%ecx
614	movl	$0,PCB_ONFAULT(%ecx)
615	movl	20(%esp),%ecx
616	subl	%edx,%ecx
617	movl	24(%esp),%edx
618	testl	%edx,%edx
619	jz	1f
620	movl	%ecx,(%edx)
6211:
622	popl	%edi
623	popl	%esi
624	ret
625END(copyinstr)
626
627/*
628 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
629 */
630ENTRY(copystr)
631	pushl	%esi
632	pushl	%edi
633
634	movl	12(%esp),%esi			/* %esi = from */
635	movl	16(%esp),%edi			/* %edi = to */
636	movl	20(%esp),%edx			/* %edx = maxlen */
637	incl	%edx
638	cld
6391:
640	decl	%edx
641	jz	4f
642	lodsb
643	stosb
644	orb	%al,%al
645	jnz	1b
646
647	/* Success -- 0 byte reached */
648	decl	%edx
649	xorl	%eax,%eax
650	jmp	6f
6514:
652	/* edx is zero -- return ENAMETOOLONG */
653	movl	$ENAMETOOLONG,%eax
654
6556:
656	/* set *lencopied and return %eax */
657	movl	20(%esp),%ecx
658	subl	%edx,%ecx
659	movl	24(%esp),%edx
660	testl	%edx,%edx
661	jz	7f
662	movl	%ecx,(%edx)
6637:
664	popl	%edi
665	popl	%esi
666	ret
667END(copystr)
668
669ENTRY(bcmp)
670	pushl	%edi
671	pushl	%esi
672	movl	12(%esp),%edi
673	movl	16(%esp),%esi
674	movl	20(%esp),%edx
675
676	movl	%edx,%ecx
677	shrl	$2,%ecx
678	cld					/* compare forwards */
679	repe
680	cmpsl
681	jne	1f
682
683	movl	%edx,%ecx
684	andl	$3,%ecx
685	repe
686	cmpsb
6871:
688	setne	%al
689	movsbl	%al,%eax
690	popl	%esi
691	popl	%edi
692	ret
693END(bcmp)
694
695/*
696 * Handling of special 386 registers and descriptor tables etc
697 */
698/* void lgdt(struct region_descriptor *rdp); */
699ENTRY(lgdt)
700#ifndef XEN
701	/* reload the descriptor table */
702	movl	4(%esp),%eax
703	lgdt	(%eax)
704#endif
705
706	/* flush the prefetch q */
707	jmp	1f
708	nop
7091:
710	/* reload "stale" selectors */
711	movl	$KDSEL,%eax
712	movl	%eax,%ds
713	movl	%eax,%es
714	movl	%eax,%gs
715	movl	%eax,%ss
716	movl	$KPSEL,%eax
717	movl	%eax,%fs
718
719	/* reload code selector by turning return into intersegmental return */
720	movl	(%esp),%eax
721	pushl	%eax
722	movl	$KCSEL,4(%esp)
723	MEXITCOUNT
724	lret
725END(lgdt)
726
727/* ssdtosd(*ssdp,*sdp) */
728ENTRY(ssdtosd)
729	pushl	%ebx
730	movl	8(%esp),%ecx
731	movl	8(%ecx),%ebx
732	shll	$16,%ebx
733	movl	(%ecx),%edx
734	roll	$16,%edx
735	movb	%dh,%bl
736	movb	%dl,%bh
737	rorl	$8,%ebx
738	movl	4(%ecx),%eax
739	movw	%ax,%dx
740	andl	$0xf0000,%eax
741	orl	%eax,%ebx
742	movl	12(%esp),%ecx
743	movl	%edx,(%ecx)
744	movl	%ebx,4(%ecx)
745	popl	%ebx
746	ret
747END(ssdtosd)
748
749/* void reset_dbregs() */
750ENTRY(reset_dbregs)
751	movl    $0,%eax
752	movl    %eax,%dr7     /* disable all breapoints first */
753	movl    %eax,%dr0
754	movl    %eax,%dr1
755	movl    %eax,%dr2
756	movl    %eax,%dr3
757	movl    %eax,%dr6
758	ret
759END(reset_dbregs)
760
761/*****************************************************************************/
762/* setjump, longjump                                                         */
763/*****************************************************************************/
764
765ENTRY(setjmp)
766	movl	4(%esp),%eax
767	movl	%ebx,(%eax)			/* save ebx */
768	movl	%esp,4(%eax)			/* save esp */
769	movl	%ebp,8(%eax)			/* save ebp */
770	movl	%esi,12(%eax)			/* save esi */
771	movl	%edi,16(%eax)			/* save edi */
772	movl	(%esp),%edx			/* get rta */
773	movl	%edx,20(%eax)			/* save eip */
774	xorl	%eax,%eax			/* return(0); */
775	ret
776END(setjmp)
777
778ENTRY(longjmp)
779	movl	4(%esp),%eax
780	movl	(%eax),%ebx			/* restore ebx */
781	movl	4(%eax),%esp			/* restore esp */
782	movl	8(%eax),%ebp			/* restore ebp */
783	movl	12(%eax),%esi			/* restore esi */
784	movl	16(%eax),%edi			/* restore edi */
785	movl	20(%eax),%edx			/* get rta */
786	movl	%edx,(%esp)			/* put in return frame */
787	xorl	%eax,%eax			/* return(1); */
788	incl	%eax
789	ret
790END(longjmp)
791
792/*
793 * Support for reading MSRs in the safe manner.
794 */
795ENTRY(rdmsr_safe)
796/* int rdmsr_safe(u_int msr, uint64_t *data) */
797	movl	PCPU(CURPCB),%ecx
798	movl	$msr_onfault,PCB_ONFAULT(%ecx)
799
800	movl	4(%esp),%ecx
801	rdmsr
802	movl	8(%esp),%ecx
803	movl	%eax,(%ecx)
804	movl	%edx,4(%ecx)
805	xorl	%eax,%eax
806
807	movl	PCPU(CURPCB),%ecx
808	movl	%eax,PCB_ONFAULT(%ecx)
809
810	ret
811
812/*
813 * Support for writing MSRs in the safe manner.
814 */
815ENTRY(wrmsr_safe)
816/* int wrmsr_safe(u_int msr, uint64_t data) */
817	movl	PCPU(CURPCB),%ecx
818	movl	$msr_onfault,PCB_ONFAULT(%ecx)
819
820	movl	4(%esp),%ecx
821	movl	8(%esp),%eax
822	movl	12(%esp),%edx
823	wrmsr
824	xorl	%eax,%eax
825
826	movl	PCPU(CURPCB),%ecx
827	movl	%eax,PCB_ONFAULT(%ecx)
828
829	ret
830
831/*
832 * MSR operations fault handler
833 */
834	ALIGN_TEXT
835msr_onfault:
836	movl	PCPU(CURPCB),%ecx
837	movl	$0,PCB_ONFAULT(%ecx)
838	movl	$EFAULT,%eax
839	ret
840