cpu_switch.S revision 46129
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	$Id: swtch.s,v 1.78 1999/04/02 17:59:39 alc Exp $
37 */
38
39#include "npx.h"
40#include "opt_user_ldt.h"
41#include "opt_vm86.h"
42
43#include <sys/rtprio.h>
44
45#include <machine/asmacros.h>
46
47#ifdef SMP
48#include <machine/pmap.h>
49#include <machine/apic.h>
50#include <machine/smptests.h>		/** GRAB_LOPRIO */
51#include <machine/ipl.h>
52#include <machine/lock.h>
53#endif /* SMP */
54
55#include "assym.s"
56
57
58/*****************************************************************************/
59/* Scheduling                                                                */
60/*****************************************************************************/
61
62/*
63 * The following primitives manipulate the run queues.
64 * _whichqs tells which of the 32 queues _qs
65 * have processes in them.  setrunqueue puts processes into queues, Remrq
66 * removes them from queues.  The running process is on no queue,
67 * other processes are on a queue related to p->p_priority, divided by 4
68 * actually to shrink the 0-127 range of priorities into the 32 available
69 * queues.
70 */
71	.data
72
73	.globl	_whichqs, _whichrtqs, _whichidqs
74
75_whichqs:	.long	0		/* which run queues have data */
76_whichrtqs:	.long	0		/* which realtime run qs have data */
77_whichidqs:	.long	0		/* which idletime run qs have data */
78
79	.globl	_hlt_vector
80_hlt_vector:	.long	_default_halt	/* pointer to halt routine */
81
82	.globl	_qs,_cnt,_panic
83
84	.globl	_want_resched
85_want_resched:	.long	0		/* we need to re-run the scheduler */
86#if defined(SWTCH_OPTIM_STATS)
87	.globl	_swtch_optim_stats, _tlb_flush_count
88_swtch_optim_stats:	.long	0		/* number of _swtch_optims */
89_tlb_flush_count:	.long	0
90#endif
91
92	.text
93/*
94 * setrunqueue(p)
95 *
96 * Call should be made at spl6(), and p->p_stat should be SRUN
97 */
98ENTRY(setrunqueue)
99	movl	4(%esp),%eax
100#ifdef DIAGNOSTIC
101	cmpb	$SRUN,P_STAT(%eax)
102	je	set1
103	pushl	$set2
104	call	_panic
105set1:
106#endif
107	cmpw	$RTP_PRIO_NORMAL,P_RTPRIO_TYPE(%eax) /* normal priority process? */
108	je	set_nort
109
110	movzwl	P_RTPRIO_PRIO(%eax),%edx
111
112	cmpw	$RTP_PRIO_REALTIME,P_RTPRIO_TYPE(%eax) /* RR realtime priority? */
113	je	set_rt				/* RT priority */
114	cmpw	$RTP_PRIO_FIFO,P_RTPRIO_TYPE(%eax) /* FIFO realtime priority? */
115	jne	set_id				/* must be idle priority */
116
117set_rt:
118	btsl	%edx,_whichrtqs			/* set q full bit */
119	shll	$3,%edx
120	addl	$_rtqs,%edx			/* locate q hdr */
121	movl	%edx,P_FORW(%eax)		/* link process on tail of q */
122	movl	P_BACK(%edx),%ecx
123	movl	%ecx,P_BACK(%eax)
124	movl	%eax,P_BACK(%edx)
125	movl	%eax,P_FORW(%ecx)
126	ret
127
128set_id:
129	btsl	%edx,_whichidqs			/* set q full bit */
130	shll	$3,%edx
131	addl	$_idqs,%edx			/* locate q hdr */
132	movl	%edx,P_FORW(%eax)		/* link process on tail of q */
133	movl	P_BACK(%edx),%ecx
134	movl	%ecx,P_BACK(%eax)
135	movl	%eax,P_BACK(%edx)
136	movl	%eax,P_FORW(%ecx)
137	ret
138
139set_nort:                    			/*  Normal (RTOFF) code */
140	movzbl	P_PRI(%eax),%edx
141	shrl	$2,%edx
142	btsl	%edx,_whichqs			/* set q full bit */
143	shll	$3,%edx
144	addl	$_qs,%edx			/* locate q hdr */
145	movl	%edx,P_FORW(%eax)		/* link process on tail of q */
146	movl	P_BACK(%edx),%ecx
147	movl	%ecx,P_BACK(%eax)
148	movl	%eax,P_BACK(%edx)
149	movl	%eax,P_FORW(%ecx)
150	ret
151
152set2:	.asciz	"setrunqueue"
153
154/*
155 * Remrq(p)
156 *
157 * Call should be made at spl6().
158 */
159ENTRY(remrq)
160	movl	4(%esp),%eax
161	cmpw	$RTP_PRIO_NORMAL,P_RTPRIO_TYPE(%eax) /* normal priority process? */
162	je	rem_nort
163
164	movzwl	P_RTPRIO_PRIO(%eax),%edx
165
166	cmpw	$RTP_PRIO_REALTIME,P_RTPRIO_TYPE(%eax) /* realtime priority process? */
167	je	rem0rt
168	cmpw	$RTP_PRIO_FIFO,P_RTPRIO_TYPE(%eax) /* FIFO realtime priority process? */
169	jne	rem_id
170
171rem0rt:
172	btrl	%edx,_whichrtqs			/* clear full bit, panic if clear already */
173	jb	rem1rt
174	pushl	$rem3rt
175	call	_panic
176rem1rt:
177	pushl	%edx
178	movl	P_FORW(%eax),%ecx		/* unlink process */
179	movl	P_BACK(%eax),%edx
180	movl	%edx,P_BACK(%ecx)
181	movl	P_BACK(%eax),%ecx
182	movl	P_FORW(%eax),%edx
183	movl	%edx,P_FORW(%ecx)
184	popl	%edx
185	movl	$_rtqs,%ecx
186	shll	$3,%edx
187	addl	%edx,%ecx
188	cmpl	P_FORW(%ecx),%ecx		/* q still has something? */
189	je	rem2rt
190	shrl	$3,%edx				/* yes, set bit as still full */
191	btsl	%edx,_whichrtqs
192rem2rt:
193	ret
194rem_id:
195	btrl	%edx,_whichidqs			/* clear full bit, panic if clear already */
196	jb	rem1id
197	pushl	$rem3id
198	call	_panic
199rem1id:
200	pushl	%edx
201	movl	P_FORW(%eax),%ecx		/* unlink process */
202	movl	P_BACK(%eax),%edx
203	movl	%edx,P_BACK(%ecx)
204	movl	P_BACK(%eax),%ecx
205	movl	P_FORW(%eax),%edx
206	movl	%edx,P_FORW(%ecx)
207	popl	%edx
208	movl	$_idqs,%ecx
209	shll	$3,%edx
210	addl	%edx,%ecx
211	cmpl	P_FORW(%ecx),%ecx		/* q still has something? */
212	je	rem2id
213	shrl	$3,%edx				/* yes, set bit as still full */
214	btsl	%edx,_whichidqs
215rem2id:
216	ret
217
218rem_nort:
219	movzbl	P_PRI(%eax),%edx
220	shrl	$2,%edx
221	btrl	%edx,_whichqs			/* clear full bit, panic if clear already */
222	jb	rem1
223	pushl	$rem3
224	call	_panic
225rem1:
226	pushl	%edx
227	movl	P_FORW(%eax),%ecx		/* unlink process */
228	movl	P_BACK(%eax),%edx
229	movl	%edx,P_BACK(%ecx)
230	movl	P_BACK(%eax),%ecx
231	movl	P_FORW(%eax),%edx
232	movl	%edx,P_FORW(%ecx)
233	popl	%edx
234	movl	$_qs,%ecx
235	shll	$3,%edx
236	addl	%edx,%ecx
237	cmpl	P_FORW(%ecx),%ecx		/* q still has something? */
238	je	rem2
239	shrl	$3,%edx				/* yes, set bit as still full */
240	btsl	%edx,_whichqs
241rem2:
242	ret
243
244rem3:	.asciz	"remrq"
245rem3rt:	.asciz	"remrq.rt"
246rem3id:	.asciz	"remrq.id"
247
248/*
249 * When no processes are on the runq, cpu_switch() branches to _idle
250 * to wait for something to come ready.
251 */
252	ALIGN_TEXT
253_idle:
254	xorl	%ebp,%ebp
255	movl	%ebp,_switchtime
256
257#ifdef SMP
258
259	/* when called, we have the mplock, intr disabled */
260	/* use our idleproc's "context" */
261	movl	_IdlePTD, %ecx
262	movl	%cr3, %eax
263	cmpl	%ecx, %eax
264	je		2f
265#if defined(SWTCH_OPTIM_STATS)
266	decl	_swtch_optim_stats
267	incl	_tlb_flush_count
268#endif
269	movl	%ecx, %cr3
2702:
271	/* Keep space for nonexisting return addr, or profiling bombs */
272	movl	$gd_idlestack_top-4, %ecx
273	addl	%fs:0, %ecx
274	movl	%ecx, %esp
275
276	/* update common_tss.tss_esp0 pointer */
277	movl	%ecx, _common_tss + TSS_ESP0
278
279#ifdef VM86
280	movl	_cpuid, %esi
281	btrl	%esi, _private_tss
282	jae	1f
283
284	movl	$GPROC0_SEL, %esi
285	movl	$gd_common_tssd, %edi
286	addl	%fs:0, %edi
287
288	/* move correct tss descriptor into GDT slot, then reload tr */
289	leal	_gdt(,%esi,8), %ebx		/* entry in GDT */
290	movl	0(%edi), %eax
291	movl	%eax, 0(%ebx)
292	movl	4(%edi), %eax
293	movl	%eax, 4(%ebx)
294	shll	$3, %esi			/* GSEL(entry, SEL_KPL) */
295	ltr	%si
2961:
297#endif /* VM86 */
298
299	sti
300
301	/*
302	 * XXX callers of cpu_switch() do a bogus splclock().  Locking should
303	 * be left to cpu_switch().
304	 */
305	call	_spl0
306
307	cli
308
309	/*
310	 * _REALLY_ free the lock, no matter how deep the prior nesting.
311	 * We will recover the nesting on the way out when we have a new
312	 * proc to load.
313	 *
314	 * XXX: we had damn well better be sure we had it before doing this!
315	 */
316	CPL_LOCK			/* XXX */
317	andl	$~SWI_AST_MASK, _ipending 			/* XXX */
318	movl	$0, _cpl	/* XXX Allow ASTs on other CPU */
319	CPL_UNLOCK			/* XXX */
320	movl	$FREE_LOCK, %eax
321	movl	%eax, _mp_lock
322
323	/* do NOT have lock, intrs disabled */
324	.globl	idle_loop
325idle_loop:
326
327	cmpl	$0,_smp_active
328	jne	1f
329	cmpl	$0,_cpuid
330	je	1f
331	jmp	2f
332
3331:	cmpl	$0,_whichrtqs			/* real-time queue */
334	jne	3f
335	cmpl	$0,_whichqs			/* normal queue */
336	jne	3f
337	cmpl	$0,_whichidqs			/* 'idle' queue */
338	jne	3f
339
340	cmpl	$0,_do_page_zero_idle
341	je	2f
342
343	/* XXX appears to cause panics */
344	/*
345	 * Inside zero_idle we enable interrupts and grab the mplock
346	 * as needed.  It needs to be careful about entry/exit mutexes.
347	 */
348	call	_vm_page_zero_idle		/* internal locking */
349	testl	%eax, %eax
350	jnz	idle_loop
3512:
352
353	/* enable intrs for a halt */
354	movl	$0, lapic_tpr			/* 1st candidate for an INT */
355	sti
356	call	*_hlt_vector			/* wait for interrupt */
357	cli
358	jmp	idle_loop
359
3603:
361	movl	$LOPRIO_LEVEL, lapic_tpr	/* arbitrate for INTs */
362	call	_get_mplock
363	CPL_LOCK					/* XXX */
364	movl	$SWI_AST_MASK, _cpl	/* XXX Disallow ASTs on other CPU */
365	CPL_UNLOCK					/* XXX */
366	cmpl	$0,_whichrtqs			/* real-time queue */
367	CROSSJUMP(jne, sw1a, je)
368	cmpl	$0,_whichqs			/* normal queue */
369	CROSSJUMP(jne, nortqr, je)
370	cmpl	$0,_whichidqs			/* 'idle' queue */
371	CROSSJUMP(jne, idqr, je)
372	CPL_LOCK				/* XXX */
373	movl	$0, _cpl		/* XXX Allow ASTs on other CPU */
374	CPL_UNLOCK				/* XXX */
375	call	_rel_mplock
376	jmp	idle_loop
377
378#else /* !SMP */
379
380	movl	$HIDENAME(tmpstk),%esp
381#if defined(OVERLY_CONSERVATIVE_PTD_MGMT)
382#if defined(SWTCH_OPTIM_STATS)
383	incl	_swtch_optim_stats
384#endif
385	movl	_IdlePTD, %ecx
386	movl	%cr3, %eax
387	cmpl	%ecx, %eax
388	je		2f
389#if defined(SWTCH_OPTIM_STATS)
390	decl	_swtch_optim_stats
391	incl	_tlb_flush_count
392#endif
393	movl	%ecx, %cr3
3942:
395#endif
396
397	/* update common_tss.tss_esp0 pointer */
398	movl	%esp, _common_tss + TSS_ESP0
399
400#ifdef VM86
401	movl	$0, %esi
402	btrl	%esi, _private_tss
403	jae	1f
404
405	movl	$GPROC0_SEL, %esi
406	movl	$_common_tssd, %edi
407
408	/* move correct tss descriptor into GDT slot, then reload tr */
409	leal	_gdt(,%esi,8), %ebx		/* entry in GDT */
410	movl	0(%edi), %eax
411	movl	%eax, 0(%ebx)
412	movl	4(%edi), %eax
413	movl	%eax, 4(%ebx)
414	shll	$3, %esi			/* GSEL(entry, SEL_KPL) */
415	ltr	%si
4161:
417#endif /* VM86 */
418
419	sti
420
421	/*
422	 * XXX callers of cpu_switch() do a bogus splclock().  Locking should
423	 * be left to cpu_switch().
424	 */
425	call	_spl0
426
427	ALIGN_TEXT
428idle_loop:
429	cli
430	cmpl	$0,_whichrtqs			/* real-time queue */
431	CROSSJUMP(jne, sw1a, je)
432	cmpl	$0,_whichqs			/* normal queue */
433	CROSSJUMP(jne, nortqr, je)
434	cmpl	$0,_whichidqs			/* 'idle' queue */
435	CROSSJUMP(jne, idqr, je)
436	call	_vm_page_zero_idle
437	testl	%eax, %eax
438	jnz	idle_loop
439	sti
440	call	*_hlt_vector			/* wait for interrupt */
441	jmp	idle_loop
442
443#endif /* SMP */
444
445CROSSJUMPTARGET(_idle)
446
447ENTRY(default_halt)
448#ifndef SMP
449	hlt					/* XXX:	 until a wakeup IPI */
450#endif
451	ret
452
453/*
454 * cpu_switch()
455 */
456ENTRY(cpu_switch)
457
458	/* switch to new process. first, save context as needed */
459	movl	_curproc,%ecx
460
461	/* if no process to save, don't bother */
462	testl	%ecx,%ecx
463	je	sw1
464
465#ifdef SMP
466	movb	P_ONCPU(%ecx), %al		/* save "last" cpu */
467	movb	%al, P_LASTCPU(%ecx)
468	movb	$0xff, P_ONCPU(%ecx)		/* "leave" the cpu */
469#endif /* SMP */
470	movl	P_VMSPACE(%ecx), %edx
471#ifdef SMP
472	movl	_cpuid, %eax
473#else
474	xorl	%eax, %eax
475#endif /* SMP */
476	btrl	%eax, VM_PMAP+PM_ACTIVE(%edx)
477
478	movl	P_ADDR(%ecx),%edx
479
480	movl	(%esp),%eax			/* Hardware registers */
481	movl	%eax,PCB_EIP(%edx)
482	movl	%ebx,PCB_EBX(%edx)
483	movl	%esp,PCB_ESP(%edx)
484	movl	%ebp,PCB_EBP(%edx)
485	movl	%esi,PCB_ESI(%edx)
486	movl	%edi,PCB_EDI(%edx)
487	movl	%gs,PCB_GS(%edx)
488
489#ifdef SMP
490	movl	_mp_lock, %eax
491	/* XXX FIXME: we should be saving the local APIC TPR */
492#ifdef DIAGNOSTIC
493	cmpl	$FREE_LOCK, %eax		/* is it free? */
494	je	badsw4				/* yes, bad medicine! */
495#endif /* DIAGNOSTIC */
496	andl	$COUNT_FIELD, %eax		/* clear CPU portion */
497	movl	%eax, PCB_MPNEST(%edx)		/* store it */
498#endif /* SMP */
499
500#if NNPX > 0
501	/* have we used fp, and need a save? */
502	cmpl	%ecx,_npxproc
503	jne	1f
504	addl	$PCB_SAVEFPU,%edx		/* h/w bugs make saving complicated */
505	pushl	%edx
506	call	_npxsave			/* do it in a big C function */
507	popl	%eax
5081:
509#endif	/* NNPX > 0 */
510
511	movl	$0,_curproc			/* out of process */
512
513	/* save is done, now choose a new process or idle */
514sw1:
515	cli
516
517#ifdef SMP
518	/* Stop scheduling if smp_active goes zero and we are not BSP */
519	cmpl	$0,_smp_active
520	jne	1f
521	cmpl	$0,_cpuid
522	je	1f
523	CROSSJUMP(je, _idle, jne)		/* wind down */
5241:
525#endif
526
527sw1a:
528	movl    _whichrtqs,%edi			/* pick next p. from rtqs */
529	testl	%edi,%edi
530	jz	nortqr				/* no realtime procs */
531
532	/* XXX - bsf is sloow */
533	bsfl	%edi,%ebx			/* find a full q */
534	jz	nortqr				/* no proc on rt q - try normal ... */
535
536	/* XX update whichqs? */
537	btrl	%ebx,%edi			/* clear q full status */
538	leal	_rtqs(,%ebx,8),%eax		/* select q */
539	movl	%eax,%esi
540
541	movl	P_FORW(%eax),%ecx		/* unlink from front of process q */
542	movl	P_FORW(%ecx),%edx
543	movl	%edx,P_FORW(%eax)
544	movl	P_BACK(%ecx),%eax
545	movl	%eax,P_BACK(%edx)
546
547	cmpl	P_FORW(%ecx),%esi		/* q empty */
548	je	rt3
549	btsl	%ebx,%edi			/* nope, set to indicate not empty */
550rt3:
551	movl	%edi,_whichrtqs			/* update q status */
552	jmp	swtch_com
553
554	/* old sw1a */
555/* Normal process priority's */
556nortqr:
557	movl	_whichqs,%edi
5582:
559	/* XXX - bsf is sloow */
560	bsfl	%edi,%ebx			/* find a full q */
561	jz	idqr				/* if none, idle */
562
563	/* XX update whichqs? */
564	btrl	%ebx,%edi			/* clear q full status */
565	leal	_qs(,%ebx,8),%eax		/* select q */
566	movl	%eax,%esi
567
568	movl	P_FORW(%eax),%ecx		/* unlink from front of process q */
569	movl	P_FORW(%ecx),%edx
570	movl	%edx,P_FORW(%eax)
571	movl	P_BACK(%ecx),%eax
572	movl	%eax,P_BACK(%edx)
573
574	cmpl	P_FORW(%ecx),%esi		/* q empty */
575	je	3f
576	btsl	%ebx,%edi			/* nope, set to indicate not empty */
5773:
578	movl	%edi,_whichqs			/* update q status */
579	jmp	swtch_com
580
581idqr: /* was sw1a */
582	movl    _whichidqs,%edi			/* pick next p. from idqs */
583
584	/* XXX - bsf is sloow */
585	bsfl	%edi,%ebx			/* find a full q */
586	CROSSJUMP(je, _idle, jne)		/* if no proc, idle */
587
588	/* XX update whichqs? */
589	btrl	%ebx,%edi			/* clear q full status */
590	leal	_idqs(,%ebx,8),%eax		/* select q */
591	movl	%eax,%esi
592
593	movl	P_FORW(%eax),%ecx		/* unlink from front of process q */
594	movl	P_FORW(%ecx),%edx
595	movl	%edx,P_FORW(%eax)
596	movl	P_BACK(%ecx),%eax
597	movl	%eax,P_BACK(%edx)
598
599	cmpl	P_FORW(%ecx),%esi		/* q empty */
600	je	id3
601	btsl	%ebx,%edi			/* nope, set to indicate not empty */
602id3:
603	movl	%edi,_whichidqs			/* update q status */
604
605swtch_com:
606	movl	$0,%eax
607	movl	%eax,_want_resched
608
609#ifdef	DIAGNOSTIC
610	cmpl	%eax,P_WCHAN(%ecx)
611	jne	badsw1
612	cmpb	$SRUN,P_STAT(%ecx)
613	jne	badsw2
614#endif
615
616	movl	%eax,P_BACK(%ecx) 		/* isolate process to run */
617	movl	P_ADDR(%ecx),%edx
618
619#if defined(SWTCH_OPTIM_STATS)
620	incl	_swtch_optim_stats
621#endif
622	/* switch address space */
623	movl	%cr3,%ebx
624	cmpl	PCB_CR3(%edx),%ebx
625	je	4f
626#if defined(SWTCH_OPTIM_STATS)
627	decl	_swtch_optim_stats
628	incl	_tlb_flush_count
629#endif
630	movl	PCB_CR3(%edx),%ebx
631	movl	%ebx,%cr3
6324:
633
634#ifdef VM86
635#ifdef SMP
636	movl	_cpuid, %esi
637#else
638	xorl	%esi, %esi
639#endif
640	cmpl	$0, PCB_EXT(%edx)		/* has pcb extension? */
641	je	1f
642	btsl	%esi, _private_tss		/* mark use of private tss */
643	movl	PCB_EXT(%edx), %edi		/* new tss descriptor */
644	jmp	2f
6451:
646#endif
647
648	/* update common_tss.tss_esp0 pointer */
649	movl	%edx, %ebx			/* pcb */
650#ifdef VM86
651	addl	$(UPAGES * PAGE_SIZE - 16), %ebx
652#else
653	addl	$(UPAGES * PAGE_SIZE), %ebx
654#endif /* VM86 */
655	movl	%ebx, _common_tss + TSS_ESP0
656
657#ifdef VM86
658	btrl	%esi, _private_tss
659	jae	3f
660#ifdef SMP
661	movl	$gd_common_tssd, %edi
662	addl	%fs:0, %edi
663#else
664	movl	$_common_tssd, %edi
665#endif
6662:
667	movl	$GPROC0_SEL, %esi
668	/* move correct tss descriptor into GDT slot, then reload tr */
669	leal	_gdt(,%esi,8), %ebx		/* entry in GDT */
670	movl	0(%edi), %eax
671	movl	%eax, 0(%ebx)
672	movl	4(%edi), %eax
673	movl	%eax, 4(%ebx)
674	shll	$3, %esi			/* GSEL(entry, SEL_KPL) */
675	ltr	%si
6763:
677#endif /* VM86 */
678	movl	P_VMSPACE(%ecx), %ebx
679#ifdef SMP
680	movl	_cpuid, %eax
681#else
682	xorl	%eax, %eax
683#endif
684	btsl	%eax, VM_PMAP+PM_ACTIVE(%ebx)
685
686	/* restore context */
687	movl	PCB_EBX(%edx),%ebx
688	movl	PCB_ESP(%edx),%esp
689	movl	PCB_EBP(%edx),%ebp
690	movl	PCB_ESI(%edx),%esi
691	movl	PCB_EDI(%edx),%edi
692	movl	PCB_EIP(%edx),%eax
693	movl	%eax,(%esp)
694
695#ifdef SMP
696#ifdef GRAB_LOPRIO				/* hold LOPRIO for INTs */
697#ifdef CHEAP_TPR
698	movl	$0, lapic_tpr
699#else
700	andl	$~APIC_TPR_PRIO, lapic_tpr
701#endif /** CHEAP_TPR */
702#endif /** GRAB_LOPRIO */
703	movl	_cpuid,%eax
704	movb	%al, P_ONCPU(%ecx)
705#endif /* SMP */
706	movl	%edx, _curpcb
707	movl	%ecx, _curproc			/* into next process */
708
709#ifdef SMP
710	movl	_cpu_lockid, %eax
711	orl	PCB_MPNEST(%edx), %eax		/* add next count from PROC */
712	movl	%eax, _mp_lock			/* load the mp_lock */
713	/* XXX FIXME: we should be restoring the local APIC TPR */
714#endif /* SMP */
715
716#ifdef	USER_LDT
717	cmpl	$0, PCB_USERLDT(%edx)
718	jnz	1f
719	movl	__default_ldt,%eax
720	cmpl	_currentldt,%eax
721	je	2f
722	lldt	__default_ldt
723	movl	%eax,_currentldt
724	jmp	2f
7251:	pushl	%edx
726	call	_set_user_ldt
727	popl	%edx
7282:
729#endif
730
731	/* This must be done after loading the user LDT. */
732	.globl	cpu_switch_load_gs
733cpu_switch_load_gs:
734	movl	PCB_GS(%edx),%gs
735
736	sti
737	ret
738
739CROSSJUMPTARGET(idqr)
740CROSSJUMPTARGET(nortqr)
741CROSSJUMPTARGET(sw1a)
742
743#ifdef DIAGNOSTIC
744badsw1:
745	pushl	$sw0_1
746	call	_panic
747
748sw0_1:	.asciz	"cpu_switch: has wchan"
749
750badsw2:
751	pushl	$sw0_2
752	call	_panic
753
754sw0_2:	.asciz	"cpu_switch: not SRUN"
755#endif
756
757#if defined(SMP) && defined(DIAGNOSTIC)
758badsw4:
759	pushl	$sw0_4
760	call	_panic
761
762sw0_4:	.asciz	"cpu_switch: do not have lock"
763#endif /* SMP && DIAGNOSTIC */
764
765/*
766 * savectx(pcb)
767 * Update pcb, saving current processor state.
768 */
769ENTRY(savectx)
770	/* fetch PCB */
771	movl	4(%esp),%ecx
772
773	/* caller's return address - child won't execute this routine */
774	movl	(%esp),%eax
775	movl	%eax,PCB_EIP(%ecx)
776
777	movl	%ebx,PCB_EBX(%ecx)
778	movl	%esp,PCB_ESP(%ecx)
779	movl	%ebp,PCB_EBP(%ecx)
780	movl	%esi,PCB_ESI(%ecx)
781	movl	%edi,PCB_EDI(%ecx)
782	movl	%gs,PCB_GS(%ecx)
783
784#if NNPX > 0
785	/*
786	 * If npxproc == NULL, then the npx h/w state is irrelevant and the
787	 * state had better already be in the pcb.  This is true for forks
788	 * but not for dumps (the old book-keeping with FP flags in the pcb
789	 * always lost for dumps because the dump pcb has 0 flags).
790	 *
791	 * If npxproc != NULL, then we have to save the npx h/w state to
792	 * npxproc's pcb and copy it to the requested pcb, or save to the
793	 * requested pcb and reload.  Copying is easier because we would
794	 * have to handle h/w bugs for reloading.  We used to lose the
795	 * parent's npx state for forks by forgetting to reload.
796	 */
797	movl	_npxproc,%eax
798	testl	%eax,%eax
799	je	1f
800
801	pushl	%ecx
802	movl	P_ADDR(%eax),%eax
803	leal	PCB_SAVEFPU(%eax),%eax
804	pushl	%eax
805	pushl	%eax
806	call	_npxsave
807	addl	$4,%esp
808	popl	%eax
809	popl	%ecx
810
811	pushl	$PCB_SAVEFPU_SIZE
812	leal	PCB_SAVEFPU(%ecx),%ecx
813	pushl	%ecx
814	pushl	%eax
815	call	_bcopy
816	addl	$12,%esp
817#endif	/* NNPX > 0 */
818
8191:
820	ret
821