machdep.c revision 1045
1/*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by the University of
20 *	California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	from: @(#)machdep.c	7.4 (Berkeley) 6/3/91
38 *	$Id: machdep.c,v 1.29 1994/01/21 09:56:05 davidg Exp $
39 */
40
41#include "npx.h"
42#include "isa.h"
43
44#include <stddef.h>
45#include "param.h"
46#include "systm.h"
47#include "signalvar.h"
48#include "kernel.h"
49#include "map.h"
50#include "proc.h"
51#include "user.h"
52#include "exec.h"            /* for PS_STRINGS */
53#include "buf.h"
54#include "reboot.h"
55#include "conf.h"
56#include "file.h"
57#include "callout.h"
58#include "malloc.h"
59#include "mbuf.h"
60#include "msgbuf.h"
61#include "net/netisr.h"
62
63#ifdef SYSVSHM
64#include "sys/shm.h"
65#endif
66
67#ifdef SYSVMSG
68#include "msg.h"
69#endif
70
71#ifdef SYSVSEM
72#include "sem.h"
73#endif
74
75#include "vm/vm.h"
76#include "vm/vm_kern.h"
77#include "vm/vm_page.h"
78
79#include "sys/exec.h"
80#include "sys/vnode.h"
81
82extern vm_offset_t avail_start, avail_end;
83
84#include "machine/cpu.h"
85#include "machine/reg.h"
86#include "machine/psl.h"
87#include "machine/specialreg.h"
88#include "machine/sysarch.h"
89#include "machine/cons.h"
90
91#include "i386/isa/isa.h"
92#include "i386/isa/rtc.h"
93
94static void identifycpu(void);
95static void initcpu(void);
96
97#ifndef PANIC_REBOOT_WAIT_TIME
98#define PANIC_REBOOT_WAIT_TIME 15 /* default to 15 seconds */
99#endif
100
101/*
102 * Declare these as initialized data so we can patch them.
103 */
104int	nswbuf = 0;
105#ifdef	NBUF
106int	nbuf = NBUF;
107#else
108int	nbuf = 0;
109#endif
110#ifdef	BUFPAGES
111int	bufpages = BUFPAGES;
112#else
113int	bufpages = 0;
114#endif
115extern int freebufspace;
116
117int _udatasel, _ucodesel;
118
119/*
120 * Machine-dependent startup code
121 */
122int boothowto = 0, Maxmem = 0;
123long dumplo;
124int physmem, maxmem;
125extern int bootdev;
126#ifdef SMALL
127extern int forcemaxmem;
128#endif
129int biosmem;
130
131vm_offset_t	phys_avail[6];
132
133extern cyloffset;
134
135int cpu_class;
136
137void dumpsys __P((void));
138
139void
140cpu_startup()
141{
142	register int unixsize;
143	register unsigned i;
144	register struct pte *pte;
145	int mapaddr, j;
146	register caddr_t v;
147	int maxbufs, base, residual;
148	extern long Usrptsize;
149	vm_offset_t minaddr, maxaddr;
150	vm_size_t size = 0;
151	int firstaddr;
152
153	/*
154	 * Initialize error message buffer (at end of core).
155	 */
156
157	/* avail_end was pre-decremented in pmap_bootstrap to compensate */
158	for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
159		pmap_enter(pmap_kernel(), (vm_offset_t)msgbufp,
160			   avail_end + i * NBPG,
161			   VM_PROT_ALL, TRUE);
162	msgbufmapped = 1;
163
164	/*
165	 * Good {morning,afternoon,evening,night}.
166	 */
167	printf(version);
168	identifycpu();
169	printf("real mem  = %d\n", ctob(physmem));
170
171	/*
172	 * Allocate space for system data structures.
173	 * The first available kernel virtual address is in "v".
174	 * As pages of kernel virtual memory are allocated, "v" is incremented.
175	 * As pages of memory are allocated and cleared,
176	 * "firstaddr" is incremented.
177	 * An index into the kernel page table corresponding to the
178	 * virtual memory address maintained in "v" is kept in "mapaddr".
179	 */
180
181	/*
182	 * Make two passes.  The first pass calculates how much memory is
183	 * needed and allocates it.  The second pass assigns virtual
184	 * addresses to the various data structures.
185	 */
186	firstaddr = 0;
187again:
188	v = (caddr_t)firstaddr;
189
190#define	valloc(name, type, num) \
191	    (name) = (type *)v; v = (caddr_t)((name)+(num))
192#define	valloclim(name, type, num, lim) \
193	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
194	valloc(callout, struct callout, ncallout);
195#ifdef SYSVSHM
196	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
197#endif
198#ifdef SYSVSEM
199	valloc(sema, struct semid_ds, seminfo.semmni);
200	valloc(sem, struct sem, seminfo.semmns);
201	/* This is pretty disgusting! */
202	valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
203#endif
204#ifdef SYSVMSG
205	valloc(msgpool, char, msginfo.msgmax);
206	valloc(msgmaps, struct msgmap, msginfo.msgseg);
207	valloc(msghdrs, struct msg, msginfo.msgtql);
208	valloc(msqids, struct msqid_ds, msginfo.msgmni);
209#endif
210	/*
211	 * Determine how many buffers to allocate.
212	 * Use 20% of memory of memory beyond the first 2MB
213	 * Insure a minimum of 16 fs buffers.
214	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
215	 */
216	if (bufpages == 0)
217		bufpages = ((physmem << PGSHIFT) - 2048*1024) / NBPG / 5;
218	if (bufpages < 64)
219		bufpages = 64;
220
221	/*
222	 * We must still limit the maximum number of buffers to be no
223	 * more than 2/5's of the size of the kernal malloc region, this
224	 * will only take effect for machines with lots of memory
225	 */
226	bufpages = min(bufpages, (VM_KMEM_SIZE / NBPG) * 2 / 5);
227	if (nbuf == 0) {
228		nbuf = bufpages / 2;
229		if (nbuf < 32)
230			nbuf = 32;
231	}
232	freebufspace = bufpages * NBPG;
233	if (nswbuf == 0) {
234		nswbuf = (nbuf / 2) &~ 1;	/* force even */
235		if (nswbuf > 256)
236			nswbuf = 256;		/* sanity */
237	}
238	valloc(swbuf, struct buf, nswbuf);
239	valloc(buf, struct buf, nbuf);
240
241	/*
242	 * End of first pass, size has been calculated so allocate memory
243	 */
244	if (firstaddr == 0) {
245		size = (vm_size_t)(v - firstaddr);
246		firstaddr = (int)kmem_alloc(kernel_map, round_page(size));
247		if (firstaddr == 0)
248			panic("startup: no room for tables");
249		goto again;
250	}
251	/*
252	 * End of second pass, addresses have been assigned
253	 */
254	if ((vm_size_t)(v - firstaddr) != size)
255		panic("startup: table size inconsistency");
256
257	/*
258	 * Allocate a submap for buffer space allocations.
259	 * XXX we are NOT using buffer_map, but due to
260	 * the references to it we will just allocate 1 page of
261	 * vm (not real memory) to make things happy...
262	 */
263	buffer_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
264				/* bufpages * */NBPG, TRUE);
265	/*
266	 * Allocate a submap for physio
267	 */
268	phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
269				 VM_PHYS_SIZE, TRUE);
270
271	/*
272	 * Finally, allocate mbuf pool.  Since mclrefcnt is an off-size
273	 * we use the more space efficient malloc in place of kmem_alloc.
274	 */
275	mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
276				   M_MBUF, M_NOWAIT);
277	bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
278	mb_map = kmem_suballoc(kmem_map, (vm_offset_t)&mbutl, &maxaddr,
279			       VM_MBUF_SIZE, FALSE);
280	/*
281	 * Initialize callouts
282	 */
283	callfree = callout;
284	for (i = 1; i < ncallout; i++)
285		callout[i-1].c_next = &callout[i];
286
287	printf("avail mem = %d\n", ptoa(vm_page_free_count));
288	printf("using %d buffers containing %d bytes of memory\n",
289		nbuf, bufpages * CLBYTES);
290
291	/*
292	 * Set up CPU-specific registers, cache, etc.
293	 */
294	initcpu();
295
296	/*
297	 * Set up buffers, so they can be used to read disk labels.
298	 */
299	bufinit();
300
301	/*
302	 * Configure the system.
303	 */
304	configure();
305}
306
307
308struct cpu_nameclass i386_cpus[] = {
309	{ "Intel 80286",	CPUCLASS_286 },		/* CPU_286   */
310	{ "i386SX",		CPUCLASS_386 },		/* CPU_386SX */
311	{ "i386DX",		CPUCLASS_386 },		/* CPU_386   */
312	{ "i486SX",		CPUCLASS_486 },		/* CPU_486SX */
313	{ "i486DX",		CPUCLASS_486 },		/* CPU_486   */
314	{ "i586",		CPUCLASS_586 },		/* CPU_586   */
315};
316
317static void
318identifycpu()
319{
320	printf("CPU: ");
321	if (cpu >= 0 && cpu < (sizeof i386_cpus/sizeof(struct cpu_nameclass))) {
322		printf("%s", i386_cpus[cpu].cpu_name);
323		cpu_class = i386_cpus[cpu].cpu_class;
324	} else {
325		printf("unknown cpu type %d\n", cpu);
326		panic("startup: bad cpu id");
327	}
328	printf(" (");
329	switch(cpu_class) {
330	case CPUCLASS_286:
331		printf("286");
332		break;
333	case CPUCLASS_386:
334		printf("386");
335		break;
336	case CPUCLASS_486:
337		printf("486");
338		break;
339	case CPUCLASS_586:
340		printf("586");
341		break;
342	default:
343		printf("unknown");	/* will panic below... */
344	}
345	printf("-class CPU)");
346	printf("\n");	/* cpu speed would be nice, but how? */
347
348	/*
349	 * Now that we have told the user what they have,
350	 * let them know if that machine type isn't configured.
351	 */
352	switch (cpu_class) {
353	case CPUCLASS_286:	/* a 286 should not make it this far, anyway */
354#if !defined(I386_CPU) && !defined(I486_CPU) && !defined(I586_CPU)
355#error This kernel is not configured for one of the supported CPUs
356#endif
357#if !defined(I386_CPU)
358	case CPUCLASS_386:
359#endif
360#if !defined(I486_CPU)
361	case CPUCLASS_486:
362#endif
363#if !defined(I586_CPU)
364	case CPUCLASS_586:
365#endif
366		panic("CPU class not configured");
367	default:
368		break;
369	}
370}
371
372#ifdef PGINPROF
373/*
374 * Return the difference (in microseconds)
375 * between the  current time and a previous
376 * time as represented  by the arguments.
377 * If there is a pending clock interrupt
378 * which has not been serviced due to high
379 * ipl, return error code.
380 */
381/*ARGSUSED*/
382vmtime(otime, olbolt, oicr)
383	register int otime, olbolt, oicr;
384{
385
386	return (((time.tv_sec-otime)*60 + lbolt-olbolt)*16667);
387}
388#endif
389
390extern int kstack[];
391
392/*
393 * Send an interrupt to process.
394 *
395 * Stack is set up to allow sigcode stored
396 * in u. to call routine, followed by kcall
397 * to sigreturn routine below.  After sigreturn
398 * resets the signal mask, the stack, and the
399 * frame pointer, it returns to the user
400 * specified pc, psl.
401 */
402void
403sendsig(catcher, sig, mask, code)
404	sig_t catcher;
405	int sig, mask;
406	unsigned code;
407{
408	register struct proc *p = curproc;
409	register int *regs;
410	register struct sigframe *fp;
411	struct sigacts *ps = p->p_sigacts;
412	int oonstack, frmtrap;
413
414	regs = p->p_regs;
415        oonstack = ps->ps_onstack;
416	/*
417	 * Allocate and validate space for the signal handler
418	 * context. Note that if the stack is in P0 space, the
419	 * call to grow() is a nop, and the useracc() check
420	 * will fail if the process has not already allocated
421	 * the space with a `brk'.
422	 */
423        if (!ps->ps_onstack && (ps->ps_sigonstack & sigmask(sig))) {
424		fp = (struct sigframe *)(ps->ps_sigsp
425				- sizeof(struct sigframe));
426                ps->ps_onstack = 1;
427	} else {
428		fp = (struct sigframe *)(regs[tESP]
429			- sizeof(struct sigframe));
430	}
431
432	if (useracc((caddr_t)fp, sizeof (struct sigframe), B_WRITE) == 0) {
433		/*
434		 * Process has trashed its stack; give it an illegal
435		 * instruction to halt it in its tracks.
436		 */
437		SIGACTION(p, SIGILL) = SIG_DFL;
438		sig = sigmask(SIGILL);
439		p->p_sigignore &= ~sig;
440		p->p_sigcatch &= ~sig;
441		p->p_sigmask &= ~sig;
442		psignal(p, SIGILL);
443		return;
444	}
445
446	/*
447	 * Build the argument list for the signal handler.
448	 */
449	fp->sf_signum = sig;
450	fp->sf_code = code;
451	fp->sf_scp = &fp->sf_sc;
452	fp->sf_addr = (char *) regs[tERR];
453	fp->sf_handler = catcher;
454
455	/* save scratch registers */
456	fp->sf_eax = regs[tEAX];
457	fp->sf_edx = regs[tEDX];
458	fp->sf_ecx = regs[tECX];
459
460	/*
461	 * Build the signal context to be used by sigreturn.
462	 */
463	fp->sf_sc.sc_onstack = oonstack;
464	fp->sf_sc.sc_mask = mask;
465	fp->sf_sc.sc_sp = regs[tESP];
466	fp->sf_sc.sc_fp = regs[tEBP];
467	fp->sf_sc.sc_pc = regs[tEIP];
468	fp->sf_sc.sc_ps = regs[tEFLAGS];
469	regs[tESP] = (int)fp;
470	regs[tEIP] = (int)((struct pcb *)kstack)->pcb_sigc;
471}
472
473/*
474 * System call to cleanup state after a signal
475 * has been taken.  Reset signal mask and
476 * stack state from context left by sendsig (above).
477 * Return to previous pc and psl as specified by
478 * context left by sendsig. Check carefully to
479 * make sure that the user has not modified the
480 * psl to gain improper priviledges or to cause
481 * a machine fault.
482 */
483struct sigreturn_args {
484	struct sigcontext *sigcntxp;
485};
486
487int
488sigreturn(p, uap, retval)
489	struct proc *p;
490	struct sigreturn_args *uap;
491	int *retval;
492{
493	register struct sigcontext *scp;
494	register struct sigframe *fp;
495	register int *regs = p->p_regs;
496
497	/*
498	 * (XXX old comment) regs[tESP] points to the return address.
499	 * The user scp pointer is above that.
500	 * The return address is faked in the signal trampoline code
501	 * for consistency.
502	 */
503	scp = uap->sigcntxp;
504	fp = (struct sigframe *)
505	     ((caddr_t)scp - offsetof(struct sigframe, sf_sc));
506
507	if (useracc((caddr_t)fp, sizeof (*fp), 0) == 0)
508		return(EINVAL);
509
510	/* restore scratch registers */
511	regs[tEAX] = fp->sf_eax ;
512	regs[tEDX] = fp->sf_edx ;
513	regs[tECX] = fp->sf_ecx ;
514
515	if (useracc((caddr_t)scp, sizeof (*scp), 0) == 0)
516		return(EINVAL);
517#ifdef notyet
518	if ((scp->sc_ps & PSL_MBZ) != 0 || (scp->sc_ps & PSL_MBO) != PSL_MBO) {
519		return(EINVAL);
520	}
521#endif
522        p->p_sigacts->ps_onstack = scp->sc_onstack & 01;
523	p->p_sigmask = scp->sc_mask &~
524	    (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
525	regs[tEBP] = scp->sc_fp;
526	regs[tESP] = scp->sc_sp;
527	regs[tEIP] = scp->sc_pc;
528	regs[tEFLAGS] = scp->sc_ps;
529	return(EJUSTRETURN);
530}
531
532/*
533 * a simple function to make the system panic (and dump a vmcore)
534 * in a predictable fashion
535 */
536void diediedie()
537{
538	panic("because you said to!");
539}
540
541int	waittime = -1;
542struct pcb dumppcb;
543
544void
545boot(arghowto)
546	int arghowto;
547{
548	register long dummy;		/* r12 is reserved */
549	register int howto;		/* r11 == how to boot */
550	register int devtype;		/* r10 == major of root dev */
551	extern int cold;
552	int nomsg = 1;
553
554	if (cold) {
555		printf("hit reset please");
556		for(;;);
557	}
558	howto = arghowto;
559	if ((howto&RB_NOSYNC) == 0 && waittime < 0 && bfreelist[0].b_forw) {
560		register struct buf *bp;
561		int iter, nbusy;
562
563		waittime = 0;
564		(void) splnet();
565		printf("syncing disks... ");
566		/*
567		 * Release inodes held by texts before update.
568		 */
569		if (panicstr == 0)
570			vnode_pager_umount(NULL);
571		sync(curproc, NULL, NULL);
572		/*
573		 * Unmount filesystems
574		 */
575#if 0
576		if (panicstr == 0)
577			vfs_unmountall();
578#endif
579
580		for (iter = 0; iter < 20; iter++) {
581			nbusy = 0;
582			for (bp = &buf[nbuf]; --bp >= buf; )
583				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
584					nbusy++;
585			if (nbusy == 0)
586				break;
587			if (nomsg) {
588				printf("updating disks before rebooting... ");
589				nomsg = 0;
590			}
591			printf("%d ", nbusy);
592			DELAY(40000 * iter);
593		}
594		if (nbusy)
595			printf("giving up\n");
596		else
597			printf("done\n");
598		DELAY(10000);			/* wait for printf to finish */
599	}
600	splhigh();
601	devtype = major(rootdev);
602	if (howto&RB_HALT) {
603		printf("\n");
604		printf("The operating system has halted.\n");
605		printf("Please press any key to reboot.\n\n");
606		cngetc();
607	} else {
608		if (howto & RB_DUMP) {
609			savectx(&dumppcb, 0);
610			dumppcb.pcb_ptd = rcr3();
611			dumpsys();
612
613			if (PANIC_REBOOT_WAIT_TIME != 0) {
614				if (PANIC_REBOOT_WAIT_TIME != -1) {
615					int loop;
616					printf("Automatic reboot in %d seconds - press a key on the console to abort\n",
617						PANIC_REBOOT_WAIT_TIME);
618					for (loop = PANIC_REBOOT_WAIT_TIME; loop > 0; --loop) {
619						DELAY(1000 * 1000); /* one second */
620						if (sgetc(1)) /* Did user type a key? */
621							break;
622					}
623					if (!loop)
624						goto die;
625				}
626			} else { /* zero time specified - reboot NOW */
627				goto die;
628			}
629			printf("--> Press a key on the console to reboot <--\n");
630			cngetc();
631		}
632	}
633#ifdef lint
634	dummy = 0; dummy = dummy;
635	printf("howto %d, devtype %d\n", arghowto, devtype);
636#endif
637die:
638	printf("Rebooting...\n");
639	DELAY (100000);	/* wait 100ms for printf's to complete */
640	cpu_reset();
641	for(;;) ;
642	/*NOTREACHED*/
643}
644
645unsigned long	dumpmag = 0x8fca0101UL;	/* magic number for savecore */
646int		dumpsize = 0;		/* also for savecore */
647/*
648 * Doadump comes here after turning off memory management and
649 * getting on the dump stack, either when called above, or by
650 * the auto-restart code.
651 */
652void
653dumpsys()
654{
655
656	if (dumpdev == NODEV)
657		return;
658	if ((minor(dumpdev)&07) != 1)
659		return;
660	dumpsize = physmem;
661	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
662	printf("dump ");
663	switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
664
665	case ENXIO:
666		printf("device bad\n");
667		break;
668
669	case EFAULT:
670		printf("device not ready\n");
671		break;
672
673	case EINVAL:
674		printf("area improper\n");
675		break;
676
677	case EIO:
678		printf("i/o error\n");
679		break;
680
681	case EINTR:
682		printf("aborted from console\n");
683		break;
684
685	default:
686		printf("succeeded\n");
687		break;
688	}
689}
690
691#ifdef HZ
692/*
693 * If HZ is defined we use this code, otherwise the code in
694 * /sys/i386/i386/microtime.s is used.  The othercode only works
695 * for HZ=100.
696 */
697microtime(tvp)
698	register struct timeval *tvp;
699{
700	int s = splhigh();
701
702	*tvp = time;
703	tvp->tv_usec += tick;
704	while (tvp->tv_usec > 1000000) {
705		tvp->tv_sec++;
706		tvp->tv_usec -= 1000000;
707	}
708	splx(s);
709}
710#endif /* HZ */
711
712void
713physstrat(bp, strat, prio)
714	struct buf *bp;
715	int (*strat)(), prio;
716{
717	register int s;
718	caddr_t baddr;
719
720	vmapbuf(bp);
721	(*strat)(bp);
722	/* pageout daemon doesn't wait for pushed pages */
723	if (bp->b_flags & B_DIRTY)
724		return;
725	s = splbio();
726	while ((bp->b_flags & B_DONE) == 0)
727	  tsleep((caddr_t)bp, prio, "physstr", 0);
728	splx(s);
729	vunmapbuf(bp);
730}
731
732static void
733initcpu()
734{
735}
736
737/*
738 * Clear registers on exec
739 */
740void
741setregs(p, entry, stack)
742	struct proc *p;
743	u_long entry;
744	u_long stack;
745{
746	p->p_regs[tEBP] = 0;	/* bottom of the fp chain */
747	p->p_regs[tEIP] = entry;
748	p->p_regs[tESP] = stack;
749	p->p_regs[tSS] = _udatasel;
750	p->p_regs[tDS] = _udatasel;
751	p->p_regs[tES] = _udatasel;
752	p->p_regs[tCS] = _ucodesel;
753
754	p->p_addr->u_pcb.pcb_flags = 0;	/* no fp at all */
755	load_cr0(rcr0() | CR0_TS);	/* start emulating */
756#if	NNPX > 0
757	npxinit(__INITIAL_NPXCW__);
758#endif	/* NNPX > 0 */
759}
760
761/*
762 * Initialize 386 and configure to run kernel
763 */
764
765/*
766 * Initialize segments & interrupt table
767 */
768#define DESCRIPTOR_SIZE	8
769
770#define	GNULL_SEL	0	/* Null Descriptor */
771#define	GCODE_SEL	1	/* Kernel Code Descriptor */
772#define	GDATA_SEL	2	/* Kernel Data Descriptor */
773#define	GLDT_SEL	3	/* LDT - eventually one per process */
774#define	GTGATE_SEL	4	/* Process task switch gate */
775#define	GPANIC_SEL	5	/* Task state to consider panic from */
776#define	GPROC0_SEL	6	/* Task state process slot zero and up */
777#define NGDT 	GPROC0_SEL+1
778
779unsigned char gdt[GPROC0_SEL+1][DESCRIPTOR_SIZE];
780
781/* interrupt descriptor table */
782struct gate_descriptor idt[NIDT];
783
784/* local descriptor table */
785unsigned char ldt[5][DESCRIPTOR_SIZE];
786#define	LSYS5CALLS_SEL	0	/* forced by intel BCS */
787#define	LSYS5SIGR_SEL	1
788
789#define	L43BSDCALLS_SEL	2	/* notyet */
790#define	LUCODE_SEL	3
791#define	LUDATA_SEL	4
792/* seperate stack, es,fs,gs sels ? */
793/* #define	LPOSIXCALLS_SEL	5*/	/* notyet */
794
795struct	i386tss	tss, panic_tss;
796
797extern  struct user *proc0paddr;
798
799/* software prototypes -- in more palatable form */
800struct soft_segment_descriptor gdt_segs[] = {
801	/* Null Descriptor */
802{	0x0,			/* segment base address  */
803	0x0,			/* length */
804	0,			/* segment type */
805	0,			/* segment descriptor priority level */
806	0,			/* segment descriptor present */
807	0, 0,
808	0,			/* default 32 vs 16 bit size */
809	0  			/* limit granularity (byte/page units)*/ },
810	/* Code Descriptor for kernel */
811{	0x0,			/* segment base address  */
812	0xfffff,		/* length - all address space */
813	SDT_MEMERA,		/* segment type */
814	0,			/* segment descriptor priority level */
815	1,			/* segment descriptor present */
816	0, 0,
817	1,			/* default 32 vs 16 bit size */
818	1  			/* limit granularity (byte/page units)*/ },
819	/* Data Descriptor for kernel */
820{	0x0,			/* segment base address  */
821	0xfffff,		/* length - all address space */
822	SDT_MEMRWA,		/* segment type */
823	0,			/* segment descriptor priority level */
824	1,			/* segment descriptor present */
825	0, 0,
826	1,			/* default 32 vs 16 bit size */
827	1  			/* limit granularity (byte/page units)*/ },
828	/* LDT Descriptor */
829{	(int) ldt,			/* segment base address  */
830	sizeof(ldt)-1,		/* length - all address space */
831	SDT_SYSLDT,		/* segment type */
832	0,			/* segment descriptor priority level */
833	1,			/* segment descriptor present */
834	0, 0,
835	0,			/* unused - default 32 vs 16 bit size */
836	0  			/* limit granularity (byte/page units)*/ },
837	/* Null Descriptor - Placeholder */
838{	0x0,			/* segment base address  */
839	0x0,			/* length - all address space */
840	0,			/* segment type */
841	0,			/* segment descriptor priority level */
842	0,			/* segment descriptor present */
843	0, 0,
844	0,			/* default 32 vs 16 bit size */
845	0  			/* limit granularity (byte/page units)*/ },
846	/* Panic Tss Descriptor */
847{	(int) &panic_tss,		/* segment base address  */
848	sizeof(tss)-1,		/* length - all address space */
849	SDT_SYS386TSS,		/* segment type */
850	0,			/* segment descriptor priority level */
851	1,			/* segment descriptor present */
852	0, 0,
853	0,			/* unused - default 32 vs 16 bit size */
854	0  			/* limit granularity (byte/page units)*/ },
855	/* Proc 0 Tss Descriptor */
856{	(int) kstack,			/* segment base address  */
857	sizeof(tss)-1,		/* length - all address space */
858	SDT_SYS386TSS,		/* segment type */
859	0,			/* segment descriptor priority level */
860	1,			/* segment descriptor present */
861	0, 0,
862	0,			/* unused - default 32 vs 16 bit size */
863	0  			/* limit granularity (byte/page units)*/ }};
864
865struct soft_segment_descriptor ldt_segs[] = {
866	/* Null Descriptor - overwritten by call gate */
867{	0x0,			/* segment base address  */
868	0x0,			/* length - all address space */
869	0,			/* segment type */
870	0,			/* segment descriptor priority level */
871	0,			/* segment descriptor present */
872	0, 0,
873	0,			/* default 32 vs 16 bit size */
874	0  			/* limit granularity (byte/page units)*/ },
875	/* Null Descriptor - overwritten by call gate */
876{	0x0,			/* segment base address  */
877	0x0,			/* length - all address space */
878	0,			/* segment type */
879	0,			/* segment descriptor priority level */
880	0,			/* segment descriptor present */
881	0, 0,
882	0,			/* default 32 vs 16 bit size */
883	0  			/* limit granularity (byte/page units)*/ },
884	/* Null Descriptor - overwritten by call gate */
885{	0x0,			/* segment base address  */
886	0x0,			/* length - all address space */
887	0,			/* segment type */
888	0,			/* segment descriptor priority level */
889	0,			/* segment descriptor present */
890	0, 0,
891	0,			/* default 32 vs 16 bit size */
892	0  			/* limit granularity (byte/page units)*/ },
893	/* Code Descriptor for user */
894{	0x0,			/* segment base address  */
895	0xfffff,		/* length - all address space */
896	SDT_MEMERA,		/* segment type */
897	SEL_UPL,		/* segment descriptor priority level */
898	1,			/* segment descriptor present */
899	0, 0,
900	1,			/* default 32 vs 16 bit size */
901	1  			/* limit granularity (byte/page units)*/ },
902	/* Data Descriptor for user */
903{	0x0,			/* segment base address  */
904	0xfffff,		/* length - all address space */
905	SDT_MEMRWA,		/* segment type */
906	SEL_UPL,		/* segment descriptor priority level */
907	1,			/* segment descriptor present */
908	0, 0,
909	1,			/* default 32 vs 16 bit size */
910	1  			/* limit granularity (byte/page units)*/ } };
911
912void
913setidt(idx, func, typ, dpl)
914	int idx;
915	void (*func)();
916	int typ;
917	int dpl;
918{
919	struct gate_descriptor *ip = idt + idx;
920
921	ip->gd_looffset = (int)func;
922	ip->gd_selector = 8;
923	ip->gd_stkcpy = 0;
924	ip->gd_xx = 0;
925	ip->gd_type = typ;
926	ip->gd_dpl = dpl;
927	ip->gd_p = 1;
928	ip->gd_hioffset = ((int)func)>>16 ;
929}
930
931#define	IDTVEC(name)	__CONCAT(X, name)
932typedef void idtvec_t();
933
934extern idtvec_t
935	IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
936	IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(dble), IDTVEC(fpusegm),
937	IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
938	IDTVEC(page), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(rsvd0),
939	IDTVEC(rsvd1), IDTVEC(rsvd2), IDTVEC(rsvd3), IDTVEC(rsvd4),
940	IDTVEC(rsvd5), IDTVEC(rsvd6), IDTVEC(rsvd7), IDTVEC(rsvd8),
941	IDTVEC(rsvd9), IDTVEC(rsvd10), IDTVEC(rsvd11), IDTVEC(rsvd12),
942	IDTVEC(rsvd13), IDTVEC(rsvd14), IDTVEC(rsvd14), IDTVEC(syscall);
943
944int _gsel_tss;
945
946void
947init386(first)
948	int first;
949{
950	extern ssdtosd(), lgdt(), lidt(), lldt(), etext;
951	int x, *pi;
952	unsigned biosbasemem, biosextmem;
953	struct gate_descriptor *gdp;
954	extern int sigcode,szsigcode;
955	/* table descriptors - used to load tables by microp */
956	unsigned short	r_gdt[3], r_idt[3];
957	int	pagesinbase, pagesinext;
958
959
960	proc0.p_addr = proc0paddr;
961
962	/*
963	 * Initialize the console before we print anything out.
964	 */
965
966	cninit ();
967
968	/*
969	 * make gdt memory segments, the code segment goes up to end of the
970	 * page with etext in it, the data segment goes to the end of
971	 * the address space
972	 */
973	gdt_segs[GCODE_SEL].ssd_limit = i386_btop(i386_round_page(&etext)) - 1;
974	gdt_segs[GDATA_SEL].ssd_limit = 0xffffffffUL;	/* XXX constant? */
975	for (x=0; x < NGDT; x++) ssdtosd(gdt_segs+x, gdt+x);
976	/* make ldt memory segments */
977	/*
978	 * The data segment limit must not cover the user area because we
979	 * don't want the user area to be writable in copyout() etc. (page
980	 * level protection is lost in kernel mode on 386's).  Also, we
981	 * don't want the user area to be writable directly (page level
982	 * protection of the user area is not available on 486's with
983	 * CR0_WP set, because there is no user-read/kernel-write mode).
984	 *
985	 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max.  And it
986	 * should be spelled ...MAX_USER...
987	 */
988#define VM_END_USER_RW_ADDRESS	VM_MAXUSER_ADDRESS
989	/*
990	 * The code segment limit has to cover the user area until we move
991	 * the signal trampoline out of the user area.  This is safe because
992	 * the code segment cannot be written to directly.
993	 */
994#define VM_END_USER_R_ADDRESS	(VM_END_USER_RW_ADDRESS + UPAGES * NBPG)
995	ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1;
996	ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1;
997	/* Note. eventually want private ldts per process */
998	for (x=0; x < 5; x++) ssdtosd(ldt_segs+x, ldt+x);
999
1000	/* exceptions */
1001	setidt(0, &IDTVEC(div),  SDT_SYS386TGT, SEL_KPL);
1002	setidt(1, &IDTVEC(dbg),  SDT_SYS386TGT, SEL_KPL);
1003	setidt(2, &IDTVEC(nmi),  SDT_SYS386TGT, SEL_KPL);
1004 	setidt(3, &IDTVEC(bpt),  SDT_SYS386TGT, SEL_UPL);
1005	setidt(4, &IDTVEC(ofl),  SDT_SYS386TGT, SEL_KPL);
1006	setidt(5, &IDTVEC(bnd),  SDT_SYS386TGT, SEL_KPL);
1007	setidt(6, &IDTVEC(ill),  SDT_SYS386TGT, SEL_KPL);
1008	setidt(7, &IDTVEC(dna),  SDT_SYS386TGT, SEL_KPL);
1009	setidt(8, &IDTVEC(dble),  SDT_SYS386TGT, SEL_KPL);
1010	setidt(9, &IDTVEC(fpusegm),  SDT_SYS386TGT, SEL_KPL);
1011	setidt(10, &IDTVEC(tss),  SDT_SYS386TGT, SEL_KPL);
1012	setidt(11, &IDTVEC(missing),  SDT_SYS386TGT, SEL_KPL);
1013	setidt(12, &IDTVEC(stk),  SDT_SYS386TGT, SEL_KPL);
1014	setidt(13, &IDTVEC(prot),  SDT_SYS386TGT, SEL_KPL);
1015	setidt(14, &IDTVEC(page),  SDT_SYS386TGT, SEL_KPL);
1016	setidt(15, &IDTVEC(rsvd),  SDT_SYS386TGT, SEL_KPL);
1017	setidt(16, &IDTVEC(fpu),  SDT_SYS386TGT, SEL_KPL);
1018	setidt(17, &IDTVEC(rsvd0),  SDT_SYS386TGT, SEL_KPL);
1019	setidt(18, &IDTVEC(rsvd1),  SDT_SYS386TGT, SEL_KPL);
1020	setidt(19, &IDTVEC(rsvd2),  SDT_SYS386TGT, SEL_KPL);
1021	setidt(20, &IDTVEC(rsvd3),  SDT_SYS386TGT, SEL_KPL);
1022	setidt(21, &IDTVEC(rsvd4),  SDT_SYS386TGT, SEL_KPL);
1023	setidt(22, &IDTVEC(rsvd5),  SDT_SYS386TGT, SEL_KPL);
1024	setidt(23, &IDTVEC(rsvd6),  SDT_SYS386TGT, SEL_KPL);
1025	setidt(24, &IDTVEC(rsvd7),  SDT_SYS386TGT, SEL_KPL);
1026	setidt(25, &IDTVEC(rsvd8),  SDT_SYS386TGT, SEL_KPL);
1027	setidt(26, &IDTVEC(rsvd9),  SDT_SYS386TGT, SEL_KPL);
1028	setidt(27, &IDTVEC(rsvd10),  SDT_SYS386TGT, SEL_KPL);
1029	setidt(28, &IDTVEC(rsvd11),  SDT_SYS386TGT, SEL_KPL);
1030	setidt(29, &IDTVEC(rsvd12),  SDT_SYS386TGT, SEL_KPL);
1031	setidt(30, &IDTVEC(rsvd13),  SDT_SYS386TGT, SEL_KPL);
1032	setidt(31, &IDTVEC(rsvd14),  SDT_SYS386TGT, SEL_KPL);
1033
1034#include	"isa.h"
1035#if	NISA >0
1036	isa_defaultirq();
1037#endif
1038
1039	r_gdt[0] = (unsigned short) (sizeof(gdt) - 1);
1040	r_gdt[1] = (unsigned short) ((int) gdt & 0xffff);
1041	r_gdt[2] = (unsigned short) ((int) gdt >> 16);
1042	lgdt(&r_gdt);
1043	r_idt[0] = (unsigned short) (sizeof(idt) - 1);
1044	r_idt[1] = (unsigned short) ((int) idt & 0xfffff);
1045	r_idt[2] = (unsigned short) ((int) idt >> 16);
1046	lidt(&r_idt);
1047	lldt(GSEL(GLDT_SEL, SEL_KPL));
1048
1049#include "ddb.h"
1050#if NDDB > 0
1051	kdb_init();
1052	if (boothowto & RB_KDB)
1053		Debugger("Boot flags requested debugger");
1054#endif
1055
1056	/* Use BIOS values stored in RTC CMOS RAM, since probing
1057	 * breaks certain 386 AT relics.
1058	 */
1059	biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8);
1060	biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8);
1061
1062	/*
1063	 * If BIOS tells us that it has more than 640k in the basemem,
1064	 *	don't believe it - set it to 640k.
1065	 */
1066	if (biosbasemem > 640)
1067		biosbasemem = 640;
1068
1069	/*
1070	 * Some 386 machines might give us a bogus number for extended
1071	 *	mem. If this happens, stop now.
1072	 */
1073#ifndef LARGEMEM
1074	if (biosextmem > 65536) {
1075		panic("extended memory beyond limit of 64MB");
1076		/* NOT REACHED */
1077	}
1078#endif
1079
1080	pagesinbase = biosbasemem * 1024 / NBPG;
1081	pagesinext = biosextmem * 1024 / NBPG;
1082
1083	/*
1084	 * Special hack for chipsets that still remap the 384k hole when
1085	 *	there's 16MB of memory - this really confuses people that
1086	 *	are trying to use bus mastering ISA controllers with the
1087	 *	"16MB limit"; they only have 16MB, but the remapping puts
1088	 *	them beyond the limit.
1089	 * XXX - this should be removed when bounce buffers are
1090	 *	implemented.
1091	 */
1092	/*
1093	 * If extended memory is between 15-16MB (16-17MB phys address range),
1094	 *	chop it to 15MB.
1095	 */
1096	if ((pagesinext > 3840) && (pagesinext < 4096))
1097		pagesinext = 3840;
1098
1099	/*
1100	 * Maxmem isn't the "maximum memory", it's the highest page of
1101	 * of the physical address space. It should be "Maxphyspage".
1102	 */
1103	Maxmem = pagesinext + 0x100000/NBPG;
1104
1105#ifdef MAXMEM
1106	if (MAXMEM/4 < Maxmem)
1107		Maxmem = MAXMEM/4;
1108#endif
1109	maxmem = Maxmem - 1;	/* highest page of usable memory */
1110	physmem = maxmem;	/* number of pages of physmem addr space */
1111
1112	if (Maxmem < 2048/4) {
1113		panic("Too little memory (2MB required)");
1114		/* NOT REACHED */
1115	}
1116
1117	/* call pmap initialization to make new kernel address space */
1118	pmap_bootstrap (first, 0);
1119
1120	/*
1121	 * Initialize pointers to the two chunks of memory; for use
1122	 *	later in vm_page_startup.
1123	 */
1124	/* avail_start and avail_end are initialized in pmap_bootstrap */
1125	x = 0;
1126	if (pagesinbase > 1) {
1127		phys_avail[x++] = NBPG;		/* skip first page of memory */
1128		phys_avail[x++] = pagesinbase * NBPG;	/* memory up to the ISA hole */
1129	}
1130	phys_avail[x++] = avail_start;	/* memory up to the end */
1131	phys_avail[x++] = avail_end;
1132	phys_avail[x++] = 0;		/* no more chunks */
1133	phys_avail[x++] = 0;
1134
1135	/* now running on new page tables, configured,and u/iom is accessible */
1136
1137	/* make a initial tss so microp can get interrupt stack on syscall! */
1138	proc0.p_addr->u_pcb.pcb_tss.tss_esp0 = (int) kstack + UPAGES*NBPG;
1139	proc0.p_addr->u_pcb.pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
1140	_gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
1141
1142	((struct i386tss *)gdt_segs[GPROC0_SEL].ssd_base)->tss_ioopt =
1143		(sizeof(tss))<<16;
1144
1145	ltr(_gsel_tss);
1146
1147	/* make a call gate to reenter kernel with */
1148	gdp = (struct gate_descriptor *) &ldt[LSYS5CALLS_SEL][0];
1149
1150	x = (int) &IDTVEC(syscall);
1151	gdp->gd_looffset = x++;
1152	gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
1153	gdp->gd_stkcpy = 1;
1154	gdp->gd_type = SDT_SYS386CGT;
1155	gdp->gd_dpl = SEL_UPL;
1156	gdp->gd_p = 1;
1157	gdp->gd_hioffset = ((int) &IDTVEC(syscall)) >>16;
1158
1159	/* transfer to user mode */
1160
1161	_ucodesel = LSEL(LUCODE_SEL, SEL_UPL);
1162	_udatasel = LSEL(LUDATA_SEL, SEL_UPL);
1163
1164	/* setup proc 0's pcb */
1165	bcopy(&sigcode, proc0.p_addr->u_pcb.pcb_sigc, szsigcode);
1166	proc0.p_addr->u_pcb.pcb_flags = 0;
1167	proc0.p_addr->u_pcb.pcb_ptd = IdlePTD;
1168}
1169
1170/*aston() {
1171	schednetisr(NETISR_AST);
1172}*/
1173
1174void
1175setsoftclock() {
1176	schednetisr(NETISR_SCLK);
1177}
1178
1179/*
1180 * insert an element into a queue
1181 */
1182#undef insque
1183void				/* XXX replace with inline FIXME! */
1184_insque(element, head)
1185	register struct prochd *element, *head;
1186{
1187	element->ph_link = head->ph_link;
1188	head->ph_link = (struct proc *)element;
1189	element->ph_rlink = (struct proc *)head;
1190	((struct prochd *)(element->ph_link))->ph_rlink=(struct proc *)element;
1191}
1192
1193/*
1194 * remove an element from a queue
1195 */
1196#undef remque
1197void				/* XXX replace with inline FIXME! */
1198_remque(element)
1199	register struct prochd *element;
1200{
1201	((struct prochd *)(element->ph_link))->ph_rlink = element->ph_rlink;
1202	((struct prochd *)(element->ph_rlink))->ph_link = element->ph_link;
1203	element->ph_rlink = (struct proc *)0;
1204}
1205
1206/*
1207 * The registers are in the frame; the frame is in the user area of
1208 * the process in question; when the process is active, the registers
1209 * are in "the kernel stack"; when it's not, they're still there, but
1210 * things get flipped around.  So, since p->p_regs is the whole address
1211 * of the register set, take its offset from the kernel stack, and
1212 * index into the user block.  Don't you just *love* virtual memory?
1213 * (I'm starting to think seymour is right...)
1214 */
1215
1216int
1217ptrace_set_pc (struct proc *p, unsigned int addr) {
1218	void *regs = (char*)p->p_addr +
1219		((char*) p->p_regs - (char*) kstack);
1220
1221	((struct trapframe *)regs)->tf_eip = addr;
1222	return 0;
1223}
1224
1225int
1226ptrace_single_step (struct proc *p) {
1227	void *regs = (char*)p->p_addr +
1228		((char*) p->p_regs - (char*) kstack);
1229
1230	((struct trapframe *)regs)->tf_eflags |= PSL_T;
1231	return 0;
1232}
1233
1234/*
1235 * Copy the registers to user-space.
1236 */
1237
1238int
1239ptrace_getregs (struct proc *p, unsigned int *addr) {
1240	int error;
1241	struct regs regs = {0};
1242
1243	if (error = fill_regs (p, &regs))
1244		return error;
1245
1246	return copyout (&regs, addr, sizeof (regs));
1247}
1248
1249int
1250ptrace_setregs (struct proc *p, unsigned int *addr) {
1251	int error;
1252	struct regs regs = {0};
1253
1254	if (error = copyin (addr, &regs, sizeof(regs)))
1255		return error;
1256
1257	return set_regs (p, &regs);
1258}
1259
1260int
1261fill_regs(struct proc *p, struct regs *regs) {
1262	int error;
1263	struct trapframe *tp;
1264	void *ptr = (char*)p->p_addr +
1265		((char*) p->p_regs - (char*) kstack);
1266
1267	tp = ptr;
1268	regs->r_es = tp->tf_es;
1269	regs->r_ds = tp->tf_ds;
1270	regs->r_edi = tp->tf_edi;
1271	regs->r_esi = tp->tf_esi;
1272	regs->r_ebp = tp->tf_ebp;
1273	regs->r_ebx = tp->tf_ebx;
1274	regs->r_edx = tp->tf_edx;
1275	regs->r_ecx = tp->tf_ecx;
1276	regs->r_eax = tp->tf_eax;
1277	regs->r_eip = tp->tf_eip;
1278	regs->r_cs = tp->tf_cs;
1279	regs->r_eflags = tp->tf_eflags;
1280	regs->r_esp = tp->tf_esp;
1281	regs->r_ss = tp->tf_ss;
1282	return 0;
1283}
1284
1285int
1286set_regs (struct proc *p, struct regs *regs) {
1287	int error;
1288	struct trapframe *tp;
1289	void *ptr = (char*)p->p_addr +
1290		((char*) p->p_regs - (char*) kstack);
1291
1292	tp = ptr;
1293	tp->tf_es = regs->r_es;
1294	tp->tf_ds = regs->r_ds;
1295	tp->tf_edi = regs->r_edi;
1296	tp->tf_esi = regs->r_esi;
1297	tp->tf_ebp = regs->r_ebp;
1298	tp->tf_ebx = regs->r_ebx;
1299	tp->tf_edx = regs->r_edx;
1300	tp->tf_ecx = regs->r_ecx;
1301	tp->tf_eax = regs->r_eax;
1302	tp->tf_eip = regs->r_eip;
1303	tp->tf_cs = regs->r_cs;
1304	tp->tf_eflags = regs->r_eflags;
1305	tp->tf_esp = regs->r_esp;
1306	tp->tf_ss = regs->r_ss;
1307	return 0;
1308}
1309
1310#include "ddb.h"
1311#if NDDB <= 0
1312void
1313Debugger(const char *msg)
1314{
1315	printf("Debugger(\"%s\") called.", msg);
1316}
1317#endif /* no DDB */
1318