swtch.s revision 26267
1178476Sjb/*-
2178476Sjb * Copyright (c) 1990 The Regents of the University of California.
3178476Sjb * All rights reserved.
4178476Sjb *
5178476Sjb * This code is derived from software contributed to Berkeley by
6178476Sjb * William Jolitz.
7178476Sjb *
8178476Sjb * Redistribution and use in source and binary forms, with or without
9178476Sjb * modification, are permitted provided that the following conditions
10178476Sjb * are met:
11178476Sjb * 1. Redistributions of source code must retain the above copyright
12178476Sjb *    notice, this list of conditions and the following disclaimer.
13178476Sjb * 2. Redistributions in binary form must reproduce the above copyright
14178476Sjb *    notice, this list of conditions and the following disclaimer in the
15178476Sjb *    documentation and/or other materials provided with the distribution.
16178476Sjb * 3. All advertising materials mentioning features or use of this software
17178476Sjb *    must display the following acknowledgement:
18178476Sjb *	This product includes software developed by the University of
19178476Sjb *	California, Berkeley and its contributors.
20178476Sjb * 4. Neither the name of the University nor the names of its contributors
21178476Sjb *    may be used to endorse or promote products derived from this software
22178476Sjb *    without specific prior written permission.
23178476Sjb *
24178476Sjb * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25178476Sjb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26178476Sjb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27178476Sjb * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28178476Sjb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29178476Sjb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30178476Sjb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31178476Sjb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32178476Sjb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33178476Sjb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34178476Sjb * SUCH DAMAGE.
35178476Sjb *
36178476Sjb *	$Id: swtch.s,v 1.49 1997/04/28 08:39:48 fsmp Exp $
37178476Sjb */
38178476Sjb
39178476Sjb#include "npx.h"
40178476Sjb#include "opt_user_ldt.h"
41178476Sjb#include "opt_smp_privpages.h"
42178476Sjb
43178476Sjb#include <sys/rtprio.h>
44178476Sjb
45178476Sjb#include <machine/asmacros.h>
46178476Sjb#include <machine/spl.h>
47178476Sjb#include <machine/smpasm.h>
48178476Sjb#include <machine/smptests.h>		/** TEST_LOPRIO */
49178476Sjb
50178476Sjb#if defined(SMP) && defined(SMP_PRIVPAGES)
51178476Sjb#include <machine/pmap.h>
52178476Sjb#endif
53178476Sjb
54178476Sjb#include "assym.s"
55178476Sjb
56178476Sjb
57178476Sjb/*****************************************************************************/
58178476Sjb/* Scheduling                                                                */
59/*****************************************************************************/
60
61/*
62 * The following primitives manipulate the run queues.
63 * _whichqs tells which of the 32 queues _qs
64 * have processes in them.  setrunqueue puts processes into queues, Remrq
65 * removes them from queues.  The running process is on no queue,
66 * other processes are on a queue related to p->p_priority, divided by 4
67 * actually to shrink the 0-127 range of priorities into the 32 available
68 * queues.
69 */
70	.data
71#ifndef SMP
72	.globl	_curpcb
73_curpcb:	.long	0			/* pointer to curproc's PCB area */
74#endif
75	.globl	_whichqs, _whichrtqs, _whichidqs
76
77_whichqs:	.long	0			/* which run queues have data */
78_whichrtqs:	.long	0			/* which realtime run queues have data */
79_whichidqs:	.long	0			/* which idletime run queues have data */
80	.globl	_hlt_vector
81_hlt_vector:	.long	_default_halt		/* pointer to halt routine */
82
83
84	.globl	_qs,_cnt,_panic
85
86	.globl	_want_resched
87_want_resched:	.long	0			/* we need to re-run the scheduler */
88
89	.text
90/*
91 * setrunqueue(p)
92 *
93 * Call should be made at spl6(), and p->p_stat should be SRUN
94 */
95ENTRY(setrunqueue)
96	movl	4(%esp),%eax
97#ifdef DIAGNOSTIC
98	cmpb	$SRUN,P_STAT(%eax)
99	je	set1
100	pushl	$set2
101	call	_panic
102set1:
103#endif
104	cmpw	$RTP_PRIO_NORMAL,P_RTPRIO_TYPE(%eax) /* normal priority process? */
105	je	set_nort
106
107	movzwl	P_RTPRIO_PRIO(%eax),%edx
108
109	cmpw	$RTP_PRIO_REALTIME,P_RTPRIO_TYPE(%eax) /* realtime priority? */
110	jne	set_id				/* must be idle priority */
111
112set_rt:
113	btsl	%edx,_whichrtqs			/* set q full bit */
114	shll	$3,%edx
115	addl	$_rtqs,%edx			/* locate q hdr */
116	movl	%edx,P_FORW(%eax)		/* link process on tail of q */
117	movl	P_BACK(%edx),%ecx
118	movl	%ecx,P_BACK(%eax)
119	movl	%eax,P_BACK(%edx)
120	movl	%eax,P_FORW(%ecx)
121	ret
122
123set_id:
124	btsl	%edx,_whichidqs			/* set q full bit */
125	shll	$3,%edx
126	addl	$_idqs,%edx			/* locate q hdr */
127	movl	%edx,P_FORW(%eax)		/* link process on tail of q */
128	movl	P_BACK(%edx),%ecx
129	movl	%ecx,P_BACK(%eax)
130	movl	%eax,P_BACK(%edx)
131	movl	%eax,P_FORW(%ecx)
132	ret
133
134set_nort:                    			/*  Normal (RTOFF) code */
135	movzbl	P_PRI(%eax),%edx
136	shrl	$2,%edx
137	btsl	%edx,_whichqs			/* set q full bit */
138	shll	$3,%edx
139	addl	$_qs,%edx			/* locate q hdr */
140	movl	%edx,P_FORW(%eax)		/* link process on tail of q */
141	movl	P_BACK(%edx),%ecx
142	movl	%ecx,P_BACK(%eax)
143	movl	%eax,P_BACK(%edx)
144	movl	%eax,P_FORW(%ecx)
145	ret
146
147set2:	.asciz	"setrunqueue"
148
149/*
150 * Remrq(p)
151 *
152 * Call should be made at spl6().
153 */
154ENTRY(remrq)
155	movl	4(%esp),%eax
156	cmpw	$RTP_PRIO_NORMAL,P_RTPRIO_TYPE(%eax) /* normal priority process? */
157	je	rem_nort
158
159	movzwl	P_RTPRIO_PRIO(%eax),%edx
160
161	cmpw	$RTP_PRIO_REALTIME,P_RTPRIO_TYPE(%eax) /* normal priority process? */
162	jne	rem_id
163
164	btrl	%edx,_whichrtqs			/* clear full bit, panic if clear already */
165	jb	rem1rt
166	pushl	$rem3rt
167	call	_panic
168rem1rt:
169	pushl	%edx
170	movl	P_FORW(%eax),%ecx		/* unlink process */
171	movl	P_BACK(%eax),%edx
172	movl	%edx,P_BACK(%ecx)
173	movl	P_BACK(%eax),%ecx
174	movl	P_FORW(%eax),%edx
175	movl	%edx,P_FORW(%ecx)
176	popl	%edx
177	movl	$_rtqs,%ecx
178	shll	$3,%edx
179	addl	%edx,%ecx
180	cmpl	P_FORW(%ecx),%ecx		/* q still has something? */
181	je	rem2rt
182	shrl	$3,%edx				/* yes, set bit as still full */
183	btsl	%edx,_whichrtqs
184rem2rt:
185	ret
186rem_id:
187	btrl	%edx,_whichidqs			/* clear full bit, panic if clear already */
188	jb	rem1id
189	pushl	$rem3id
190	call	_panic
191rem1id:
192	pushl	%edx
193	movl	P_FORW(%eax),%ecx		/* unlink process */
194	movl	P_BACK(%eax),%edx
195	movl	%edx,P_BACK(%ecx)
196	movl	P_BACK(%eax),%ecx
197	movl	P_FORW(%eax),%edx
198	movl	%edx,P_FORW(%ecx)
199	popl	%edx
200	movl	$_idqs,%ecx
201	shll	$3,%edx
202	addl	%edx,%ecx
203	cmpl	P_FORW(%ecx),%ecx		/* q still has something? */
204	je	rem2id
205	shrl	$3,%edx				/* yes, set bit as still full */
206	btsl	%edx,_whichidqs
207rem2id:
208	ret
209
210rem_nort:
211	movzbl	P_PRI(%eax),%edx
212	shrl	$2,%edx
213	btrl	%edx,_whichqs			/* clear full bit, panic if clear already */
214	jb	rem1
215	pushl	$rem3
216	call	_panic
217rem1:
218	pushl	%edx
219	movl	P_FORW(%eax),%ecx		/* unlink process */
220	movl	P_BACK(%eax),%edx
221	movl	%edx,P_BACK(%ecx)
222	movl	P_BACK(%eax),%ecx
223	movl	P_FORW(%eax),%edx
224	movl	%edx,P_FORW(%ecx)
225	popl	%edx
226	movl	$_qs,%ecx
227	shll	$3,%edx
228	addl	%edx,%ecx
229	cmpl	P_FORW(%ecx),%ecx		/* q still has something? */
230	je	rem2
231	shrl	$3,%edx				/* yes, set bit as still full */
232	btsl	%edx,_whichqs
233rem2:
234	ret
235
236rem3:	.asciz	"remrq"
237rem3rt:	.asciz	"remrq.rt"
238rem3id:	.asciz	"remrq.id"
239
240/*
241 * When no processes are on the runq, cpu_switch() branches to _idle
242 * to wait for something to come ready.
243 *
244 * NOTE: on an SMP system this routine is a startup-only code path.
245 * once initialization is over, meaning the idle procs have been
246 * created, we should NEVER branch here.
247 */
248	ALIGN_TEXT
249_idle:
250#ifdef SMP
251	movl	_smp_active, %eax
252	cmpl	$0, %eax
253	jnz	badsw
254#endif /* SMP */
255	xorl	%ebp,%ebp
256	movl	$HIDENAME(tmpstk),%esp
257	movl	_IdlePTD,%ecx
258	movl	%ecx,%cr3
259
260	/* update common_tss.tss_esp0 pointer */
261#ifdef SMP
262	GETCPUID(%eax)
263	movl	_SMPcommon_tss_ptr(,%eax,4), %eax
264#else
265	movl	$_common_tss, %eax
266#endif
267	movl	%esp, TSS_ESP0(%eax)
268
269#ifdef TSS_IS_CACHED				/* example only */
270	/* Reload task register to force reload of selector */
271	movl	_tssptr, %ebx
272	andb	$~0x02, 5(%ebx)			/* Flip 386BSY -> 386TSS */
273	movl	_gsel_tss, %ebx
274	ltr	%bx
275#endif
276
277	sti
278
279	/*
280	 * XXX callers of cpu_switch() do a bogus splclock().  Locking should
281	 * be left to cpu_switch().
282	 */
283	movl	$SWI_AST_MASK,_cpl
284	testl	$~SWI_AST_MASK,_ipending
285	je	idle_loop
286	call	_splz
287
288	ALIGN_TEXT
289idle_loop:
290	cli
291	movb	$1,_intr_nesting_level		/* charge Intr if we leave */
292	cmpl	$0,_whichrtqs			/* real-time queue */
293	CROSSJUMP(jne, sw1a, je)
294	cmpl	$0,_whichqs			/* normal queue */
295	CROSSJUMP(jne, nortqr, je)
296	cmpl	$0,_whichidqs			/* 'idle' queue */
297	CROSSJUMP(jne, idqr, je)
298	movb	$0,_intr_nesting_level		/* charge Idle for this loop */
299	call	_vm_page_zero_idle
300	testl	%eax, %eax
301	jnz	idle_loop
302	sti
303	call	*_hlt_vector			/* wait for interrupt */
304	jmp	idle_loop
305
306CROSSJUMPTARGET(_idle)
307
308ENTRY(default_halt)
309	hlt
310	ret
311
312/*
313 * cpu_switch()
314 */
315ENTRY(cpu_switch)
316
317	/* switch to new process. first, save context as needed */
318	GETCURPROC(%ecx)
319
320	/* if no process to save, don't bother */
321	testl	%ecx,%ecx
322	je	sw1
323
324#ifdef SMP
325	movb	P_ONCPU(%ecx), %al		/* save "last" cpu */
326	movb	%al, P_LASTCPU(%ecx)
327	movb	$0xff, P_ONCPU(%ecx)		/* "leave" the cpu */
328#endif
329
330	movl	P_ADDR(%ecx),%ecx
331
332	movl	(%esp),%eax			/* Hardware registers */
333	movl	%eax,PCB_EIP(%ecx)
334	movl	%ebx,PCB_EBX(%ecx)
335	movl	%esp,PCB_ESP(%ecx)
336	movl	%ebp,PCB_EBP(%ecx)
337	movl	%esi,PCB_ESI(%ecx)
338	movl	%edi,PCB_EDI(%ecx)
339
340#ifdef SMP
341	movl	_mp_lock, %eax
342	cmpl	$0xffffffff, %eax		/* is it free? */
343	je	badsw				/* yes, bad medicine! */
344	andl	$0x00ffffff, %eax		/* clear CPU portion */
345	movl	%eax,PCB_MPNEST(%ecx)		/* store it */
346#endif /* SMP */
347
348#if NNPX > 0
349	/* have we used fp, and need a save? */
350	GETCURPROC(%eax)
351	GETNPXPROC(%ebx)
352	cmp	%eax,%ebx
353	jne	1f
354	addl	$PCB_SAVEFPU,%ecx		/* h/w bugs make saving complicated */
355	pushl	%ecx
356	call	_npxsave			/* do it in a big C function */
357	popl	%eax
3581:
359#endif	/* NNPX > 0 */
360
361	movb	$1,_intr_nesting_level		/* charge Intr, not Sys/Idle */
362
363	SETCURPROC($0, %edi)
364
365	/* save is done, now choose a new process or idle */
366sw1:
367	cli
368sw1a:
369	movl    _whichrtqs,%edi			/* pick next p. from rtqs */
370	testl	%edi,%edi
371	jz	nortqr				/* no realtime procs */
372
373	/* XXX - bsf is sloow */
374	bsfl	%edi,%ebx			/* find a full q */
375	jz	nortqr				/* no proc on rt q - try normal ... */
376
377	/* XX update whichqs? */
378	btrl	%ebx,%edi			/* clear q full status */
379	leal	_rtqs(,%ebx,8),%eax		/* select q */
380	movl	%eax,%esi
381
382	movl	P_FORW(%eax),%ecx		/* unlink from front of process q */
383	movl	P_FORW(%ecx),%edx
384	movl	%edx,P_FORW(%eax)
385	movl	P_BACK(%ecx),%eax
386	movl	%eax,P_BACK(%edx)
387
388	cmpl	P_FORW(%ecx),%esi		/* q empty */
389	je	rt3
390	btsl	%ebx,%edi			/* nope, set to indicate not empty */
391rt3:
392	movl	%edi,_whichrtqs			/* update q status */
393	jmp	swtch_com
394
395	/* old sw1a */
396/* Normal process priority's */
397nortqr:
398	movl	_whichqs,%edi
3992:
400	/* XXX - bsf is sloow */
401	bsfl	%edi,%ebx			/* find a full q */
402	jz	idqr				/* if none, idle */
403
404	/* XX update whichqs? */
405	btrl	%ebx,%edi			/* clear q full status */
406	leal	_qs(,%ebx,8),%eax		/* select q */
407	movl	%eax,%esi
408
409	movl	P_FORW(%eax),%ecx		/* unlink from front of process q */
410	movl	P_FORW(%ecx),%edx
411	movl	%edx,P_FORW(%eax)
412	movl	P_BACK(%ecx),%eax
413	movl	%eax,P_BACK(%edx)
414
415	cmpl	P_FORW(%ecx),%esi		/* q empty */
416	je	3f
417	btsl	%ebx,%edi			/* nope, set to indicate not empty */
4183:
419	movl	%edi,_whichqs			/* update q status */
420	jmp	swtch_com
421
422idqr: /* was sw1a */
423	movl    _whichidqs,%edi			/* pick next p. from idqs */
424
425	/* XXX - bsf is sloow */
426	bsfl	%edi,%ebx			/* find a full q */
427	CROSSJUMP(je, _idle, jne)		/* if no proc, idle */
428
429	/* XX update whichqs? */
430	btrl	%ebx,%edi			/* clear q full status */
431	leal	_idqs(,%ebx,8),%eax		/* select q */
432	movl	%eax,%esi
433
434	movl	P_FORW(%eax),%ecx		/* unlink from front of process q */
435	movl	P_FORW(%ecx),%edx
436	movl	%edx,P_FORW(%eax)
437	movl	P_BACK(%ecx),%eax
438	movl	%eax,P_BACK(%edx)
439
440	cmpl	P_FORW(%ecx),%esi		/* q empty */
441	je	id3
442	btsl	%ebx,%edi			/* nope, set to indicate not empty */
443id3:
444	movl	%edi,_whichidqs			/* update q status */
445
446swtch_com:
447	movl	$0,%eax
448	movl	%eax,_want_resched
449
450#ifdef	DIAGNOSTIC
451	cmpl	%eax,P_WCHAN(%ecx)
452	jne	badsw
453	cmpb	$SRUN,P_STAT(%ecx)
454	jne	badsw
455#endif
456
457	movl	%eax,P_BACK(%ecx) 		/* isolate process to run */
458	movl	P_ADDR(%ecx),%edx
459	movl	PCB_CR3(%edx),%ebx
460
461#if defined(SMP) && defined(SMP_PRIVPAGES)
462	/* Grab the private PT pointer from the outgoing process's PTD */
463	movl	$_PTD,%esi
464	movl	4*MPPTDI(%esi), %eax		/* fetch cpu's prv pt */
465#endif
466
467	/* switch address space */
468	movl	%ebx,%cr3
469
470#if defined(SMP) && defined(SMP_PRIVPAGES)
471	/* Copy the private PT to the new process's PTD */
472	/* XXX yuck, the _PTD changes when we switch, so we have to
473	 * reload %cr3 after changing the address space.
474	 * We need to fix this by storing a pointer to the virtual
475	 * location of the per-process PTD in the PCB or something quick.
476	 * Dereferencing proc->vm_map->pmap->p_pdir[] is painful in asm.
477	 */
478	movl	$_PTD,%esi
479	movl	%eax, 4*MPPTDI(%esi)		/* restore cpu's prv page */
480
481	/* XXX: we have just changed the page tables.. reload.. */
482	movl	%ebx,%cr3
483#endif
484
485#ifdef HOW_TO_SWITCH_TSS			/* example only */
486	/* Fix up tss pointer to floating pcb/stack structure */
487	/* XXX probably lots faster to store the 64 bits of tss entry
488	 * in the pcb somewhere and copy them on activation.
489	 */
490	movl	_tssptr, %ebx
491	movl	%edx, %eax			/* edx = pcb/tss */
492	movw	%ax, 2(%ebx)			/* store bits 0->15 */
493	roll	$16, %eax			/* swap upper and lower */
494	movb	%al, 4(%ebx)			/* store bits 16->23 */
495	movb	%ah, 7(%ebx)			/* store bits 24->31 */
496	andb	$~0x02, 5(%ebx)			/* Flip 386BSY -> 386TSS */
497#endif
498
499	/* update common_tss.tss_esp0 pointer */
500#ifdef SMP
501	GETCPUID(%eax)
502	movl	_SMPcommon_tss_ptr(,%eax,4), %eax
503#else
504	movl	$_common_tss, %eax
505#endif
506	movl	%edx, %ebx			/* pcb */
507	addl	$(UPAGES * PAGE_SIZE), %ebx
508	movl	%ebx, TSS_ESP0(%eax)
509
510#ifdef TSS_IS_CACHED				/* example only */
511	/* Reload task register to force reload of selector */
512	movl	_tssptr, %ebx
513	andb	$~0x02, 5(%ebx)			/* Flip 386BSY -> 386TSS */
514	movl	_gsel_tss, %ebx
515	ltr	%bx
516#endif
517
518	/* restore context */
519	movl	PCB_EBX(%edx),%ebx
520	movl	PCB_ESP(%edx),%esp
521	movl	PCB_EBP(%edx),%ebp
522	movl	PCB_ESI(%edx),%esi
523	movl	PCB_EDI(%edx),%edi
524	movl	PCB_EIP(%edx),%eax
525	movl	%eax,(%esp)
526
527#ifdef SMP
528	GETCPUID(%eax)
529	movb	%al, P_ONCPU(%ecx)
530#endif
531	SETCURPCB(%edx, %eax)
532	SETCURPROC(%ecx, %eax)
533
534	movb	$0,_intr_nesting_level
535#ifdef SMP
536	movl	_apic_base, %eax		/* base addr of LOCAL APIC */
537#if defined(TEST_LOPRIO)
538	pushl	%edx
539	movl	APIC_TPR(%eax), %edx		/* get TPR register contents */
540	andl	$~0xff, %edx			/* clear the prio field */
541	movl	%edx, APIC_TPR(%eax)		/* now hold loprio for INTs */
542	popl	%edx
543#endif /* TEST_LOPRIO */
544	movl	APIC_ID(%eax), %eax		/* APIC ID register */
545	andl	$APIC_ID_MASK, %eax		/* extract ID portion */
546	orl	PCB_MPNEST(%edx), %eax		/* add count from PROC */
547	movl	%eax, _mp_lock			/* load the mp_lock */
548#endif /* SMP */
549
550#ifdef	USER_LDT
551	cmpl	$0, PCB_USERLDT(%edx)
552	jnz	1f
553	movl	__default_ldt,%eax
554	cmpl	_currentldt,%eax
555	je	2f
556	lldt	__default_ldt
557	movl	%eax,_currentldt
558	jmp	2f
5591:	pushl	%edx
560	call	_set_user_ldt
561	popl	%edx
5622:
563#endif
564
565	sti
566	ret
567
568CROSSJUMPTARGET(idqr)
569CROSSJUMPTARGET(nortqr)
570CROSSJUMPTARGET(sw1a)
571
572badsw:
573	pushl	$sw0
574	call	_panic
575
576sw0:	.asciz	"cpu_switch"
577
578/*
579 * savectx(pcb)
580 * Update pcb, saving current processor state.
581 */
582ENTRY(savectx)
583	/* fetch PCB */
584	movl	4(%esp),%ecx
585
586	/* caller's return address - child won't execute this routine */
587	movl	(%esp),%eax
588	movl	%eax,PCB_EIP(%ecx)
589
590	movl	%ebx,PCB_EBX(%ecx)
591	movl	%esp,PCB_ESP(%ecx)
592	movl	%ebp,PCB_EBP(%ecx)
593	movl	%esi,PCB_ESI(%ecx)
594	movl	%edi,PCB_EDI(%ecx)
595
596#if NNPX > 0
597	/*
598	 * If npxproc == NULL, then the npx h/w state is irrelevant and the
599	 * state had better already be in the pcb.  This is true for forks
600	 * but not for dumps (the old book-keeping with FP flags in the pcb
601	 * always lost for dumps because the dump pcb has 0 flags).
602	 *
603	 * If npxproc != NULL, then we have to save the npx h/w state to
604	 * npxproc's pcb and copy it to the requested pcb, or save to the
605	 * requested pcb and reload.  Copying is easier because we would
606	 * have to handle h/w bugs for reloading.  We used to lose the
607	 * parent's npx state for forks by forgetting to reload.
608	 */
609	GETNPXPROC(%eax)
610	testl	%eax,%eax
611	je	1f
612
613	pushl	%ecx
614	movl	P_ADDR(%eax),%eax
615	leal	PCB_SAVEFPU(%eax),%eax
616	pushl	%eax
617	pushl	%eax
618	call	_npxsave
619	addl	$4,%esp
620	popl	%eax
621	popl	%ecx
622
623	pushl	$PCB_SAVEFPU_SIZE
624	leal	PCB_SAVEFPU(%ecx),%ecx
625	pushl	%ecx
626	pushl	%eax
627	call	_bcopy
628	addl	$12,%esp
629#endif	/* NNPX > 0 */
630
6311:
632	ret
633