machdep.c revision 987
1/*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by the University of
20 *	California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	from: @(#)machdep.c	7.4 (Berkeley) 6/3/91
38 *	$Id: machdep.c,v 1.25 1994/01/14 16:23:35 davidg Exp $
39 */
40
41#include "npx.h"
42#include "isa.h"
43
44#include <stddef.h>
45#include "param.h"
46#include "systm.h"
47#include "signalvar.h"
48#include "kernel.h"
49#include "map.h"
50#include "proc.h"
51#include "user.h"
52#include "exec.h"            /* for PS_STRINGS */
53#include "buf.h"
54#include "reboot.h"
55#include "conf.h"
56#include "file.h"
57#include "callout.h"
58#include "malloc.h"
59#include "mbuf.h"
60#include "msgbuf.h"
61#include "net/netisr.h"
62
63#ifdef SYSVSHM
64#include "sys/shm.h"
65#endif
66
67#include "vm/vm.h"
68#include "vm/vm_kern.h"
69#include "vm/vm_page.h"
70
71#include "sys/exec.h"
72#include "sys/vnode.h"
73
74extern vm_offset_t avail_start, avail_end;
75
76#include "machine/cpu.h"
77#include "machine/reg.h"
78#include "machine/psl.h"
79#include "machine/specialreg.h"
80#include "machine/sysarch.h"
81#include "machine/cons.h"
82
83#include "i386/isa/isa.h"
84#include "i386/isa/rtc.h"
85
86static void identifycpu(void);
87static void initcpu(void);
88
89#ifndef PANIC_REBOOT_WAIT_TIME
90#define PANIC_REBOOT_WAIT_TIME 15 /* default to 15 seconds */
91#endif
92
93/*
94 * Declare these as initialized data so we can patch them.
95 */
96int	nswbuf = 0;
97#ifdef	NBUF
98int	nbuf = NBUF;
99#else
100int	nbuf = 0;
101#endif
102#ifdef	BUFPAGES
103int	bufpages = BUFPAGES;
104#else
105int	bufpages = 0;
106#endif
107extern int freebufspace;
108
109int _udatasel, _ucodesel;
110
111/*
112 * Machine-dependent startup code
113 */
114int boothowto = 0, Maxmem = 0;
115long dumplo;
116int physmem, maxmem;
117extern int bootdev;
118#ifdef SMALL
119extern int forcemaxmem;
120#endif
121int biosmem;
122
123vm_offset_t	phys_avail[6];
124
125extern cyloffset;
126
127int cpu_class;
128
129void dumpsys __P((void));
130
131void
132cpu_startup()
133{
134	register int unixsize;
135	register unsigned i;
136	register struct pte *pte;
137	int mapaddr, j;
138	register caddr_t v;
139	int maxbufs, base, residual;
140	extern long Usrptsize;
141	vm_offset_t minaddr, maxaddr;
142	vm_size_t size = 0;
143	int firstaddr;
144
145	/*
146	 * Initialize error message buffer (at end of core).
147	 */
148
149	/* avail_end was pre-decremented in pmap_bootstrap to compensate */
150	for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
151		pmap_enter(pmap_kernel(), (vm_offset_t)msgbufp,
152			   avail_end + i * NBPG,
153			   VM_PROT_ALL, TRUE);
154	msgbufmapped = 1;
155
156	/*
157	 * Good {morning,afternoon,evening,night}.
158	 */
159	printf(version);
160	identifycpu();
161	printf("real mem  = %d\n", ctob(physmem));
162
163	/*
164	 * Allocate space for system data structures.
165	 * The first available kernel virtual address is in "v".
166	 * As pages of kernel virtual memory are allocated, "v" is incremented.
167	 * As pages of memory are allocated and cleared,
168	 * "firstaddr" is incremented.
169	 * An index into the kernel page table corresponding to the
170	 * virtual memory address maintained in "v" is kept in "mapaddr".
171	 */
172
173	/*
174	 * Make two passes.  The first pass calculates how much memory is
175	 * needed and allocates it.  The second pass assigns virtual
176	 * addresses to the various data structures.
177	 */
178	firstaddr = 0;
179again:
180	v = (caddr_t)firstaddr;
181
182#define	valloc(name, type, num) \
183	    (name) = (type *)v; v = (caddr_t)((name)+(num))
184#define	valloclim(name, type, num, lim) \
185	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
186/*	valloc(cfree, struct cblock, nclist);  no clists any more!!! - cgd */
187	valloc(callout, struct callout, ncallout);
188#ifdef NetBSD
189	valloc(swapmap, struct map, nswapmap = maxproc * 2);
190#endif
191#ifdef SYSVSHM
192	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
193#endif
194	/*
195	 * Determine how many buffers to allocate.
196	 * Use 20% of memory of memory beyond the first 2MB
197	 * Insure a minimum of 16 fs buffers.
198	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
199	 */
200	if (bufpages == 0)
201		bufpages = ((physmem << PGSHIFT) - 2048*1024) / NBPG / 5;
202	if (bufpages < 64)
203		bufpages = 64;
204
205	/*
206	 * We must still limit the maximum number of buffers to be no
207	 * more than 2/5's of the size of the kernal malloc region, this
208	 * will only take effect for machines with lots of memory
209	 */
210	bufpages = min(bufpages, (VM_KMEM_SIZE / NBPG) * 2 / 5);
211	if (nbuf == 0) {
212		nbuf = bufpages / 2;
213		if (nbuf < 32)
214			nbuf = 32;
215	}
216	freebufspace = bufpages * NBPG;
217	if (nswbuf == 0) {
218		nswbuf = (nbuf / 2) &~ 1;	/* force even */
219		if (nswbuf > 256)
220			nswbuf = 256;		/* sanity */
221	}
222	valloc(swbuf, struct buf, nswbuf);
223	valloc(buf, struct buf, nbuf);
224
225	/*
226	 * End of first pass, size has been calculated so allocate memory
227	 */
228	if (firstaddr == 0) {
229		size = (vm_size_t)(v - firstaddr);
230		firstaddr = (int)kmem_alloc(kernel_map, round_page(size));
231		if (firstaddr == 0)
232			panic("startup: no room for tables");
233		goto again;
234	}
235	/*
236	 * End of second pass, addresses have been assigned
237	 */
238	if ((vm_size_t)(v - firstaddr) != size)
239		panic("startup: table size inconsistency");
240
241	/*
242	 * Allocate a submap for buffer space allocations.
243	 * XXX we are NOT using buffer_map, but due to
244	 * the references to it we will just allocate 1 page of
245	 * vm (not real memory) to make things happy...
246	 */
247	buffer_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
248				/* bufpages * */NBPG, TRUE);
249	/*
250	 * Allocate a submap for exec arguments.  This map effectively
251	 * limits the number of processes exec'ing at any time.
252	 */
253/*	exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
254 *				16*NCARGS, TRUE);
255 *	NOT CURRENTLY USED -- cgd
256 */
257	/*
258	 * Allocate a submap for physio
259	 */
260	phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
261				 VM_PHYS_SIZE, TRUE);
262
263	/*
264	 * Finally, allocate mbuf pool.  Since mclrefcnt is an off-size
265	 * we use the more space efficient malloc in place of kmem_alloc.
266	 */
267	mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
268				   M_MBUF, M_NOWAIT);
269	bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
270	mb_map = kmem_suballoc(kmem_map, (vm_offset_t)&mbutl, &maxaddr,
271			       VM_MBUF_SIZE, FALSE);
272	/*
273	 * Initialize callouts
274	 */
275	callfree = callout;
276	for (i = 1; i < ncallout; i++)
277		callout[i-1].c_next = &callout[i];
278
279	printf("avail mem = %d\n", ptoa(vm_page_free_count));
280	printf("using %d buffers containing %d bytes of memory\n",
281		nbuf, bufpages * CLBYTES);
282
283	/*
284	 * Set up CPU-specific registers, cache, etc.
285	 */
286	initcpu();
287
288	/*
289	 * Set up buffers, so they can be used to read disk labels.
290	 */
291	bufinit();
292
293	/*
294	 * Configure the system.
295	 */
296	configure();
297}
298
299
300struct cpu_nameclass i386_cpus[] = {
301	{ "Intel 80286",	CPUCLASS_286 },		/* CPU_286   */
302	{ "i386SX",		CPUCLASS_386 },		/* CPU_386SX */
303	{ "i386DX",		CPUCLASS_386 },		/* CPU_386   */
304	{ "i486SX",		CPUCLASS_486 },		/* CPU_486SX */
305	{ "i486DX",		CPUCLASS_486 },		/* CPU_486   */
306	{ "i586",		CPUCLASS_586 },		/* CPU_586   */
307};
308
309static void
310identifycpu()	/* translated from hp300 -- cgd */
311{
312	printf("CPU: ");
313	if (cpu >= 0 && cpu < (sizeof i386_cpus/sizeof(struct cpu_nameclass))) {
314		printf("%s", i386_cpus[cpu].cpu_name);
315		cpu_class = i386_cpus[cpu].cpu_class;
316	} else {
317		printf("unknown cpu type %d\n", cpu);
318		panic("startup: bad cpu id");
319	}
320	printf(" (");
321	switch(cpu_class) {
322	case CPUCLASS_286:
323		printf("286");
324		break;
325	case CPUCLASS_386:
326		printf("386");
327		break;
328	case CPUCLASS_486:
329		printf("486");
330		break;
331	case CPUCLASS_586:
332		printf("586");
333		break;
334	default:
335		printf("unknown");	/* will panic below... */
336	}
337	printf("-class CPU)");
338	printf("\n");	/* cpu speed would be nice, but how? */
339
340	/*
341	 * Now that we have told the user what they have,
342	 * let them know if that machine type isn't configured.
343	 */
344	switch (cpu_class) {
345	case CPUCLASS_286:	/* a 286 should not make it this far, anyway */
346#if !defined(I386_CPU) && !defined(I486_CPU) && !defined(I586_CPU)
347#error This kernel is not configured for one of the supported CPUs
348#endif
349#if !defined(I386_CPU)
350	case CPUCLASS_386:
351#endif
352#if !defined(I486_CPU)
353	case CPUCLASS_486:
354#endif
355#if !defined(I586_CPU)
356	case CPUCLASS_586:
357#endif
358		panic("CPU class not configured");
359	default:
360		break;
361	}
362}
363
364#ifdef PGINPROF
365/*
366 * Return the difference (in microseconds)
367 * between the  current time and a previous
368 * time as represented  by the arguments.
369 * If there is a pending clock interrupt
370 * which has not been serviced due to high
371 * ipl, return error code.
372 */
373/*ARGSUSED*/
374vmtime(otime, olbolt, oicr)
375	register int otime, olbolt, oicr;
376{
377
378	return (((time.tv_sec-otime)*60 + lbolt-olbolt)*16667);
379}
380#endif
381
382extern int kstack[];
383
384/*
385 * Send an interrupt to process.
386 *
387 * Stack is set up to allow sigcode stored
388 * in u. to call routine, followed by kcall
389 * to sigreturn routine below.  After sigreturn
390 * resets the signal mask, the stack, and the
391 * frame pointer, it returns to the user
392 * specified pc, psl.
393 */
394void
395sendsig(catcher, sig, mask, code)
396	sig_t catcher;
397	int sig, mask;
398	unsigned code;
399{
400	register struct proc *p = curproc;
401	register int *regs;
402	register struct sigframe *fp;
403	struct sigacts *ps = p->p_sigacts;
404	int oonstack, frmtrap;
405
406	regs = p->p_regs;
407        oonstack = ps->ps_onstack;
408	/*
409	 * Allocate and validate space for the signal handler
410	 * context. Note that if the stack is in P0 space, the
411	 * call to grow() is a nop, and the useracc() check
412	 * will fail if the process has not already allocated
413	 * the space with a `brk'.
414	 */
415        if (!ps->ps_onstack && (ps->ps_sigonstack & sigmask(sig))) {
416		fp = (struct sigframe *)(ps->ps_sigsp
417				- sizeof(struct sigframe));
418                ps->ps_onstack = 1;
419	} else {
420		fp = (struct sigframe *)(regs[tESP]
421			- sizeof(struct sigframe));
422	}
423
424	if (useracc((caddr_t)fp, sizeof (struct sigframe), B_WRITE) == 0) {
425		/*
426		 * Process has trashed its stack; give it an illegal
427		 * instruction to halt it in its tracks.
428		 */
429		SIGACTION(p, SIGILL) = SIG_DFL;
430		sig = sigmask(SIGILL);
431		p->p_sigignore &= ~sig;
432		p->p_sigcatch &= ~sig;
433		p->p_sigmask &= ~sig;
434		psignal(p, SIGILL);
435		return;
436	}
437
438	/*
439	 * Build the argument list for the signal handler.
440	 */
441	fp->sf_signum = sig;
442	fp->sf_code = code;
443	fp->sf_scp = &fp->sf_sc;
444	fp->sf_addr = (char *) regs[tERR];
445	fp->sf_handler = catcher;
446
447	/* save scratch registers */
448	fp->sf_eax = regs[tEAX];
449	fp->sf_edx = regs[tEDX];
450	fp->sf_ecx = regs[tECX];
451
452	/*
453	 * Build the signal context to be used by sigreturn.
454	 */
455	fp->sf_sc.sc_onstack = oonstack;
456	fp->sf_sc.sc_mask = mask;
457	fp->sf_sc.sc_sp = regs[tESP];
458	fp->sf_sc.sc_fp = regs[tEBP];
459	fp->sf_sc.sc_pc = regs[tEIP];
460	fp->sf_sc.sc_ps = regs[tEFLAGS];
461	regs[tESP] = (int)fp;
462	regs[tEIP] = (int)((struct pcb *)kstack)->pcb_sigc;
463}
464
465/*
466 * System call to cleanup state after a signal
467 * has been taken.  Reset signal mask and
468 * stack state from context left by sendsig (above).
469 * Return to previous pc and psl as specified by
470 * context left by sendsig. Check carefully to
471 * make sure that the user has not modified the
472 * psl to gain improper priviledges or to cause
473 * a machine fault.
474 */
475struct sigreturn_args {
476	struct sigcontext *sigcntxp;
477};
478
479int
480sigreturn(p, uap, retval)
481	struct proc *p;
482	struct sigreturn_args *uap;
483	int *retval;
484{
485	register struct sigcontext *scp;
486	register struct sigframe *fp;
487	register int *regs = p->p_regs;
488
489	/*
490	 * (XXX old comment) regs[tESP] points to the return address.
491	 * The user scp pointer is above that.
492	 * The return address is faked in the signal trampoline code
493	 * for consistency.
494	 */
495	scp = uap->sigcntxp;
496	fp = (struct sigframe *)
497	     ((caddr_t)scp - offsetof(struct sigframe, sf_sc));
498
499	if (useracc((caddr_t)fp, sizeof (*fp), 0) == 0)
500		return(EINVAL);
501
502	/* restore scratch registers */
503	regs[tEAX] = fp->sf_eax ;
504	regs[tEDX] = fp->sf_edx ;
505	regs[tECX] = fp->sf_ecx ;
506
507	if (useracc((caddr_t)scp, sizeof (*scp), 0) == 0)
508		return(EINVAL);
509#ifdef notyet
510	if ((scp->sc_ps & PSL_MBZ) != 0 || (scp->sc_ps & PSL_MBO) != PSL_MBO) {
511		return(EINVAL);
512	}
513#endif
514        p->p_sigacts->ps_onstack = scp->sc_onstack & 01;
515	p->p_sigmask = scp->sc_mask &~
516	    (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
517	regs[tEBP] = scp->sc_fp;
518	regs[tESP] = scp->sc_sp;
519	regs[tEIP] = scp->sc_pc;
520	regs[tEFLAGS] = scp->sc_ps;
521	return(EJUSTRETURN);
522}
523
524/*
525 * a simple function to make the system panic (and dump a vmcore)
526 * in a predictable fashion
527 */
528void diediedie()
529{
530	panic("because you said to!");
531}
532
533int	waittime = -1;
534struct pcb dumppcb;
535
536void
537boot(arghowto)
538	int arghowto;
539{
540	register long dummy;		/* r12 is reserved */
541	register int howto;		/* r11 == how to boot */
542	register int devtype;		/* r10 == major of root dev */
543	extern int cold;
544	int nomsg = 1;
545
546	if (cold) {
547		printf("hit reset please");
548		for(;;);
549	}
550	howto = arghowto;
551	if ((howto&RB_NOSYNC) == 0 && waittime < 0 && bfreelist[0].b_forw) {
552		register struct buf *bp;
553		int iter, nbusy;
554
555		waittime = 0;
556		(void) splnet();
557		printf("syncing disks... ");
558		/*
559		 * Release inodes held by texts before update.
560		 */
561		if (panicstr == 0)
562			vnode_pager_umount(NULL);
563		sync((struct sigcontext *)0);
564		/*
565		 * Unmount filesystems
566		 */
567#if 0
568		if (panicstr == 0)
569			vfs_unmountall();
570#endif
571
572		for (iter = 0; iter < 20; iter++) {
573			nbusy = 0;
574			for (bp = &buf[nbuf]; --bp >= buf; )
575				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
576					nbusy++;
577			if (nbusy == 0)
578				break;
579			if (nomsg) {
580				printf("updating disks before rebooting... ");
581				nomsg = 0;
582			}
583			printf("%d ", nbusy);
584			DELAY(40000 * iter);
585		}
586		if (nbusy)
587			printf("giving up\n");
588		else
589			printf("done\n");
590		DELAY(10000);			/* wait for printf to finish */
591	}
592	splhigh();
593	devtype = major(rootdev);
594	if (howto&RB_HALT) {
595		printf("\n");
596		printf("The operating system has halted.\n");
597		printf("Please press any key to reboot.\n\n");
598		cngetc();
599	} else {
600		if (howto & RB_DUMP) {
601			savectx(&dumppcb, 0);
602			dumppcb.pcb_ptd = rcr3();
603			dumpsys();
604
605			if (PANIC_REBOOT_WAIT_TIME != 0) {
606				if (PANIC_REBOOT_WAIT_TIME != -1) {
607					int loop;
608					printf("Automatic reboot in %d seconds - press a key on the console to abort\n",
609						PANIC_REBOOT_WAIT_TIME);
610					for (loop = PANIC_REBOOT_WAIT_TIME; loop > 0; --loop) {
611						DELAY(1000 * 1000); /* one second */
612						if (sgetc(1)) /* Did user type a key? */
613							break;
614					}
615					if (!loop)
616						goto die;
617				}
618			} else { /* zero time specified - reboot NOW */
619				goto die;
620			}
621			printf("--> Press a key on the console to reboot <--\n");
622			cngetc();
623		}
624	}
625#ifdef lint
626	dummy = 0; dummy = dummy;
627	printf("howto %d, devtype %d\n", arghowto, devtype);
628#endif
629die:
630	printf("Rebooting...\n");
631	DELAY (100000);	/* wait 100ms for printf's to complete */
632	cpu_reset();
633	for(;;) ;
634	/*NOTREACHED*/
635}
636
637unsigned long	dumpmag = 0x8fca0101UL;	/* magic number for savecore */
638int		dumpsize = 0;		/* also for savecore */
639/*
640 * Doadump comes here after turning off memory management and
641 * getting on the dump stack, either when called above, or by
642 * the auto-restart code.
643 */
644void
645dumpsys()
646{
647
648	if (dumpdev == NODEV)
649		return;
650	if ((minor(dumpdev)&07) != 1)
651		return;
652	dumpsize = physmem;
653	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
654	printf("dump ");
655	switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
656
657	case ENXIO:
658		printf("device bad\n");
659		break;
660
661	case EFAULT:
662		printf("device not ready\n");
663		break;
664
665	case EINVAL:
666		printf("area improper\n");
667		break;
668
669	case EIO:
670		printf("i/o error\n");
671		break;
672
673	case EINTR:
674		printf("aborted from console\n");
675		break;
676
677	default:
678		printf("succeeded\n");
679		break;
680	}
681}
682
683#ifdef HZ
684/*
685 * If HZ is defined we use this code, otherwise the code in
686 * /sys/i386/i386/microtime.s is used.  The othercode only works
687 * for HZ=100.
688 */
689microtime(tvp)
690	register struct timeval *tvp;
691{
692	int s = splhigh();
693
694	*tvp = time;
695	tvp->tv_usec += tick;
696	while (tvp->tv_usec > 1000000) {
697		tvp->tv_sec++;
698		tvp->tv_usec -= 1000000;
699	}
700	splx(s);
701}
702#endif /* HZ */
703
704void
705physstrat(bp, strat, prio)
706	struct buf *bp;
707	int (*strat)(), prio;
708{
709	register int s;
710	caddr_t baddr;
711
712	vmapbuf(bp);
713	(*strat)(bp);
714	/* pageout daemon doesn't wait for pushed pages */
715	if (bp->b_flags & B_DIRTY)
716		return;
717	s = splbio();
718	while ((bp->b_flags & B_DONE) == 0)
719	  tsleep((caddr_t)bp, prio, "physstr", 0);
720	splx(s);
721	vunmapbuf(bp);
722}
723
724static void
725initcpu()
726{
727}
728
729/*
730 * Clear registers on exec
731 */
732void
733setregs(p, entry, stack)
734	struct proc *p;
735	u_long entry;
736	u_long stack;
737{
738	p->p_regs[tEBP] = 0;	/* bottom of the fp chain */
739	p->p_regs[tEIP] = entry;
740	p->p_regs[tESP] = stack;
741	p->p_regs[tSS] = _udatasel;
742	p->p_regs[tDS] = _udatasel;
743	p->p_regs[tES] = _udatasel;
744	p->p_regs[tCS] = _ucodesel;
745
746	p->p_addr->u_pcb.pcb_flags = 0;	/* no fp at all */
747	load_cr0(rcr0() | CR0_TS);	/* start emulating */
748#if	NNPX > 0
749	npxinit(__INITIAL_NPXCW__);
750#endif	/* NNPX > 0 */
751}
752
753/*
754 * Initialize 386 and configure to run kernel
755 */
756
757/*
758 * Initialize segments & interrupt table
759 */
760#define DESCRIPTOR_SIZE	8
761
762#define	GNULL_SEL	0	/* Null Descriptor */
763#define	GCODE_SEL	1	/* Kernel Code Descriptor */
764#define	GDATA_SEL	2	/* Kernel Data Descriptor */
765#define	GLDT_SEL	3	/* LDT - eventually one per process */
766#define	GTGATE_SEL	4	/* Process task switch gate */
767#define	GPANIC_SEL	5	/* Task state to consider panic from */
768#define	GPROC0_SEL	6	/* Task state process slot zero and up */
769#define NGDT 	GPROC0_SEL+1
770
771unsigned char gdt[GPROC0_SEL+1][DESCRIPTOR_SIZE];
772
773/* interrupt descriptor table */
774struct gate_descriptor idt[NIDT];
775
776/* local descriptor table */
777unsigned char ldt[5][DESCRIPTOR_SIZE];
778#define	LSYS5CALLS_SEL	0	/* forced by intel BCS */
779#define	LSYS5SIGR_SEL	1
780
781#define	L43BSDCALLS_SEL	2	/* notyet */
782#define	LUCODE_SEL	3
783#define	LUDATA_SEL	4
784/* seperate stack, es,fs,gs sels ? */
785/* #define	LPOSIXCALLS_SEL	5*/	/* notyet */
786
787struct	i386tss	tss, panic_tss;
788
789extern  struct user *proc0paddr;
790
791/* software prototypes -- in more palatable form */
792struct soft_segment_descriptor gdt_segs[] = {
793	/* Null Descriptor */
794{	0x0,			/* segment base address  */
795	0x0,			/* length */
796	0,			/* segment type */
797	0,			/* segment descriptor priority level */
798	0,			/* segment descriptor present */
799	0, 0,
800	0,			/* default 32 vs 16 bit size */
801	0  			/* limit granularity (byte/page units)*/ },
802	/* Code Descriptor for kernel */
803{	0x0,			/* segment base address  */
804	0xfffff,		/* length - all address space */
805	SDT_MEMERA,		/* segment type */
806	0,			/* segment descriptor priority level */
807	1,			/* segment descriptor present */
808	0, 0,
809	1,			/* default 32 vs 16 bit size */
810	1  			/* limit granularity (byte/page units)*/ },
811	/* Data Descriptor for kernel */
812{	0x0,			/* segment base address  */
813	0xfffff,		/* length - all address space */
814	SDT_MEMRWA,		/* segment type */
815	0,			/* segment descriptor priority level */
816	1,			/* segment descriptor present */
817	0, 0,
818	1,			/* default 32 vs 16 bit size */
819	1  			/* limit granularity (byte/page units)*/ },
820	/* LDT Descriptor */
821{	(int) ldt,			/* segment base address  */
822	sizeof(ldt)-1,		/* length - all address space */
823	SDT_SYSLDT,		/* segment type */
824	0,			/* segment descriptor priority level */
825	1,			/* segment descriptor present */
826	0, 0,
827	0,			/* unused - default 32 vs 16 bit size */
828	0  			/* limit granularity (byte/page units)*/ },
829	/* Null Descriptor - Placeholder */
830{	0x0,			/* segment base address  */
831	0x0,			/* length - all address space */
832	0,			/* segment type */
833	0,			/* segment descriptor priority level */
834	0,			/* segment descriptor present */
835	0, 0,
836	0,			/* default 32 vs 16 bit size */
837	0  			/* limit granularity (byte/page units)*/ },
838	/* Panic Tss Descriptor */
839{	(int) &panic_tss,		/* segment base address  */
840	sizeof(tss)-1,		/* length - all address space */
841	SDT_SYS386TSS,		/* segment type */
842	0,			/* segment descriptor priority level */
843	1,			/* segment descriptor present */
844	0, 0,
845	0,			/* unused - default 32 vs 16 bit size */
846	0  			/* limit granularity (byte/page units)*/ },
847	/* Proc 0 Tss Descriptor */
848{	(int) kstack,			/* segment base address  */
849	sizeof(tss)-1,		/* length - all address space */
850	SDT_SYS386TSS,		/* segment type */
851	0,			/* segment descriptor priority level */
852	1,			/* segment descriptor present */
853	0, 0,
854	0,			/* unused - default 32 vs 16 bit size */
855	0  			/* limit granularity (byte/page units)*/ }};
856
857struct soft_segment_descriptor ldt_segs[] = {
858	/* Null Descriptor - overwritten by call gate */
859{	0x0,			/* segment base address  */
860	0x0,			/* length - all address space */
861	0,			/* segment type */
862	0,			/* segment descriptor priority level */
863	0,			/* segment descriptor present */
864	0, 0,
865	0,			/* default 32 vs 16 bit size */
866	0  			/* limit granularity (byte/page units)*/ },
867	/* Null Descriptor - overwritten by call gate */
868{	0x0,			/* segment base address  */
869	0x0,			/* length - all address space */
870	0,			/* segment type */
871	0,			/* segment descriptor priority level */
872	0,			/* segment descriptor present */
873	0, 0,
874	0,			/* default 32 vs 16 bit size */
875	0  			/* limit granularity (byte/page units)*/ },
876	/* Null Descriptor - overwritten by call gate */
877{	0x0,			/* segment base address  */
878	0x0,			/* length - all address space */
879	0,			/* segment type */
880	0,			/* segment descriptor priority level */
881	0,			/* segment descriptor present */
882	0, 0,
883	0,			/* default 32 vs 16 bit size */
884	0  			/* limit granularity (byte/page units)*/ },
885	/* Code Descriptor for user */
886{	0x0,			/* segment base address  */
887	0xfffff,		/* length - all address space */
888	SDT_MEMERA,		/* segment type */
889	SEL_UPL,		/* segment descriptor priority level */
890	1,			/* segment descriptor present */
891	0, 0,
892	1,			/* default 32 vs 16 bit size */
893	1  			/* limit granularity (byte/page units)*/ },
894	/* Data Descriptor for user */
895{	0x0,			/* segment base address  */
896	0xfffff,		/* length - all address space */
897	SDT_MEMRWA,		/* segment type */
898	SEL_UPL,		/* segment descriptor priority level */
899	1,			/* segment descriptor present */
900	0, 0,
901	1,			/* default 32 vs 16 bit size */
902	1  			/* limit granularity (byte/page units)*/ } };
903
904void
905setidt(idx, func, typ, dpl)
906	int idx;
907	void (*func)();
908	int typ;
909	int dpl;
910{
911	struct gate_descriptor *ip = idt + idx;
912
913	ip->gd_looffset = (int)func;
914	ip->gd_selector = 8;
915	ip->gd_stkcpy = 0;
916	ip->gd_xx = 0;
917	ip->gd_type = typ;
918	ip->gd_dpl = dpl;
919	ip->gd_p = 1;
920	ip->gd_hioffset = ((int)func)>>16 ;
921}
922
923#define	IDTVEC(name)	__CONCAT(X, name)
924typedef void idtvec_t();
925
926extern idtvec_t
927	IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
928	IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(dble), IDTVEC(fpusegm),
929	IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
930	IDTVEC(page), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(rsvd0),
931	IDTVEC(rsvd1), IDTVEC(rsvd2), IDTVEC(rsvd3), IDTVEC(rsvd4),
932	IDTVEC(rsvd5), IDTVEC(rsvd6), IDTVEC(rsvd7), IDTVEC(rsvd8),
933	IDTVEC(rsvd9), IDTVEC(rsvd10), IDTVEC(rsvd11), IDTVEC(rsvd12),
934	IDTVEC(rsvd13), IDTVEC(rsvd14), IDTVEC(rsvd14), IDTVEC(syscall);
935
936int _gsel_tss;
937
938void
939init386(first)
940	int first;
941{
942	extern ssdtosd(), lgdt(), lidt(), lldt(), etext;
943	int x, *pi;
944	unsigned biosbasemem, biosextmem;
945	struct gate_descriptor *gdp;
946	extern int sigcode,szsigcode;
947	/* table descriptors - used to load tables by microp */
948	unsigned short	r_gdt[3], r_idt[3];
949	int	pagesinbase, pagesinext;
950
951
952	proc0.p_addr = proc0paddr;
953
954	/*
955	 * Initialize the console before we print anything out.
956	 */
957
958	cninit ();
959
960	/*
961	 * make gdt memory segments, the code segment goes up to end of the
962	 * page with etext in it, the data segment goes to the end of
963	 * the address space
964	 */
965	gdt_segs[GCODE_SEL].ssd_limit = i386_btop(i386_round_page(&etext)) - 1;
966	gdt_segs[GDATA_SEL].ssd_limit = 0xffffffffUL;	/* XXX constant? */
967	for (x=0; x < NGDT; x++) ssdtosd(gdt_segs+x, gdt+x);
968	/* make ldt memory segments */
969	/*
970	 * The data segment limit must not cover the user area because we
971	 * don't want the user area to be writable in copyout() etc. (page
972	 * level protection is lost in kernel mode on 386's).  Also, we
973	 * don't want the user area to be writable directly (page level
974	 * protection of the user area is not available on 486's with
975	 * CR0_WP set, because there is no user-read/kernel-write mode).
976	 *
977	 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max.  And it
978	 * should be spelled ...MAX_USER...
979	 */
980#define VM_END_USER_RW_ADDRESS	VM_MAXUSER_ADDRESS
981	/*
982	 * The code segment limit has to cover the user area until we move
983	 * the signal trampoline out of the user area.  This is safe because
984	 * the code segment cannot be written to directly.
985	 */
986#define VM_END_USER_R_ADDRESS	(VM_END_USER_RW_ADDRESS + UPAGES * NBPG)
987	ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1;
988	ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1;
989	/* Note. eventually want private ldts per process */
990	for (x=0; x < 5; x++) ssdtosd(ldt_segs+x, ldt+x);
991
992	/* exceptions */
993	setidt(0, &IDTVEC(div),  SDT_SYS386TGT, SEL_KPL);
994	setidt(1, &IDTVEC(dbg),  SDT_SYS386TGT, SEL_KPL);
995	setidt(2, &IDTVEC(nmi),  SDT_SYS386TGT, SEL_KPL);
996 	setidt(3, &IDTVEC(bpt),  SDT_SYS386TGT, SEL_UPL);
997	setidt(4, &IDTVEC(ofl),  SDT_SYS386TGT, SEL_KPL);
998	setidt(5, &IDTVEC(bnd),  SDT_SYS386TGT, SEL_KPL);
999	setidt(6, &IDTVEC(ill),  SDT_SYS386TGT, SEL_KPL);
1000	setidt(7, &IDTVEC(dna),  SDT_SYS386TGT, SEL_KPL);
1001	setidt(8, &IDTVEC(dble),  SDT_SYS386TGT, SEL_KPL);
1002	setidt(9, &IDTVEC(fpusegm),  SDT_SYS386TGT, SEL_KPL);
1003	setidt(10, &IDTVEC(tss),  SDT_SYS386TGT, SEL_KPL);
1004	setidt(11, &IDTVEC(missing),  SDT_SYS386TGT, SEL_KPL);
1005	setidt(12, &IDTVEC(stk),  SDT_SYS386TGT, SEL_KPL);
1006	setidt(13, &IDTVEC(prot),  SDT_SYS386TGT, SEL_KPL);
1007	setidt(14, &IDTVEC(page),  SDT_SYS386TGT, SEL_KPL);
1008	setidt(15, &IDTVEC(rsvd),  SDT_SYS386TGT, SEL_KPL);
1009	setidt(16, &IDTVEC(fpu),  SDT_SYS386TGT, SEL_KPL);
1010	setidt(17, &IDTVEC(rsvd0),  SDT_SYS386TGT, SEL_KPL);
1011	setidt(18, &IDTVEC(rsvd1),  SDT_SYS386TGT, SEL_KPL);
1012	setidt(19, &IDTVEC(rsvd2),  SDT_SYS386TGT, SEL_KPL);
1013	setidt(20, &IDTVEC(rsvd3),  SDT_SYS386TGT, SEL_KPL);
1014	setidt(21, &IDTVEC(rsvd4),  SDT_SYS386TGT, SEL_KPL);
1015	setidt(22, &IDTVEC(rsvd5),  SDT_SYS386TGT, SEL_KPL);
1016	setidt(23, &IDTVEC(rsvd6),  SDT_SYS386TGT, SEL_KPL);
1017	setidt(24, &IDTVEC(rsvd7),  SDT_SYS386TGT, SEL_KPL);
1018	setidt(25, &IDTVEC(rsvd8),  SDT_SYS386TGT, SEL_KPL);
1019	setidt(26, &IDTVEC(rsvd9),  SDT_SYS386TGT, SEL_KPL);
1020	setidt(27, &IDTVEC(rsvd10),  SDT_SYS386TGT, SEL_KPL);
1021	setidt(28, &IDTVEC(rsvd11),  SDT_SYS386TGT, SEL_KPL);
1022	setidt(29, &IDTVEC(rsvd12),  SDT_SYS386TGT, SEL_KPL);
1023	setidt(30, &IDTVEC(rsvd13),  SDT_SYS386TGT, SEL_KPL);
1024	setidt(31, &IDTVEC(rsvd14),  SDT_SYS386TGT, SEL_KPL);
1025
1026#include	"isa.h"
1027#if	NISA >0
1028	isa_defaultirq();
1029#endif
1030
1031	r_gdt[0] = (unsigned short) (sizeof(gdt) - 1);
1032	r_gdt[1] = (unsigned short) ((int) gdt & 0xffff);
1033	r_gdt[2] = (unsigned short) ((int) gdt >> 16);
1034	lgdt(&r_gdt);
1035	r_idt[0] = (unsigned short) (sizeof(idt) - 1);
1036	r_idt[1] = (unsigned short) ((int) idt & 0xfffff);
1037	r_idt[2] = (unsigned short) ((int) idt >> 16);
1038	lidt(&r_idt);
1039	lldt(GSEL(GLDT_SEL, SEL_KPL));
1040
1041#include "ddb.h"
1042#if NDDB > 0
1043	kdb_init();
1044	if (boothowto & RB_KDB)
1045		Debugger("Boot flags requested debugger");
1046#endif
1047
1048	/* Use BIOS values stored in RTC CMOS RAM, since probing
1049	 * breaks certain 386 AT relics.
1050	 */
1051	biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8);
1052	biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8);
1053
1054	/*
1055	 * If BIOS tells us that it has more than 640k in the basemem,
1056	 *	don't believe it - set it to 640k.
1057	 */
1058	if (biosbasemem > 640)
1059		biosbasemem = 640;
1060
1061	/*
1062	 * Some 386 machines might give us a bogus number for extended
1063	 *	mem. If this happens, stop now.
1064	 */
1065#ifndef LARGEMEM
1066	if (biosextmem > 65536) {
1067		panic("extended memory beyond limit of 64MB");
1068		/* NOT REACHED */
1069	}
1070#endif
1071
1072	pagesinbase = biosbasemem * 1024 / NBPG;
1073	pagesinext = biosextmem * 1024 / NBPG;
1074
1075	/*
1076	 * Maxmem isn't the "maximum memory", it's the highest page of
1077	 * of the physical address space. It should be "Maxphyspage".
1078	 */
1079	Maxmem = pagesinext + 0x100000/NBPG;
1080
1081#ifdef MAXMEM
1082	if (MAXMEM/4 < Maxmem)
1083		Maxmem = MAXMEM/4;
1084#endif
1085	maxmem = Maxmem - 1;	/* highest page of usable memory */
1086	physmem = maxmem;	/* number of pages of physmem addr space */
1087
1088	if (Maxmem < 2048/4) {
1089		panic("Too little memory (2MB required)");
1090		/* NOT REACHED */
1091	}
1092
1093	/* call pmap initialization to make new kernel address space */
1094	pmap_bootstrap (first, 0);
1095
1096	/*
1097	 * Initialize pointers to the two chunks of memory; for use
1098	 *	later in vm_page_startup.
1099	 */
1100	/* avail_start and avail_end are initialized in pmap_bootstrap */
1101	x = 0;
1102	if (pagesinbase > 1) {
1103		phys_avail[x++] = NBPG;		/* skip first page of memory */
1104		phys_avail[x++] = pagesinbase * NBPG;	/* memory up to the ISA hole */
1105	}
1106	phys_avail[x++] = avail_start;	/* memory up to the end */
1107	phys_avail[x++] = avail_end;
1108	phys_avail[x++] = 0;		/* no more chunks */
1109	phys_avail[x++] = 0;
1110
1111	/* now running on new page tables, configured,and u/iom is accessible */
1112
1113	/* make a initial tss so microp can get interrupt stack on syscall! */
1114	proc0.p_addr->u_pcb.pcb_tss.tss_esp0 = (int) kstack + UPAGES*NBPG;
1115	proc0.p_addr->u_pcb.pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
1116	_gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
1117
1118	((struct i386tss *)gdt_segs[GPROC0_SEL].ssd_base)->tss_ioopt =
1119		(sizeof(tss))<<16;
1120
1121	ltr(_gsel_tss);
1122
1123	/* make a call gate to reenter kernel with */
1124	gdp = (struct gate_descriptor *) &ldt[LSYS5CALLS_SEL][0];
1125
1126	x = (int) &IDTVEC(syscall);
1127	gdp->gd_looffset = x++;
1128	gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
1129	gdp->gd_stkcpy = 1;
1130	gdp->gd_type = SDT_SYS386CGT;
1131	gdp->gd_dpl = SEL_UPL;
1132	gdp->gd_p = 1;
1133	gdp->gd_hioffset = ((int) &IDTVEC(syscall)) >>16;
1134
1135	/* transfer to user mode */
1136
1137	_ucodesel = LSEL(LUCODE_SEL, SEL_UPL);
1138	_udatasel = LSEL(LUDATA_SEL, SEL_UPL);
1139
1140	/* setup proc 0's pcb */
1141	bcopy(&sigcode, proc0.p_addr->u_pcb.pcb_sigc, szsigcode);
1142	proc0.p_addr->u_pcb.pcb_flags = 0;
1143	proc0.p_addr->u_pcb.pcb_ptd = IdlePTD;
1144}
1145
1146extern struct pte	*CMAP1, *CMAP2;
1147extern caddr_t		CADDR1, CADDR2;
1148/*
1149 * zero out physical memory
1150 * specified in relocation units (NBPG bytes)
1151 */
1152void
1153clearseg(n)
1154	int n;
1155{
1156
1157	*(int *)CMAP2 = PG_V | PG_KW | ctob(n);
1158	load_cr3(rcr3());
1159	bzero(CADDR2,NBPG);
1160	*(int *) CADDR2 = 0;
1161}
1162
1163/*
1164 * copy a page of physical memory
1165 * specified in relocation units (NBPG bytes)
1166 */
1167void
1168copyseg(frm, n)
1169	int frm;
1170	int n;
1171{
1172
1173	*(int *)CMAP2 = PG_V | PG_KW | ctob(n);
1174	load_cr3(rcr3());
1175	bcopy((void *)frm, (void *)CADDR2, NBPG);
1176}
1177
1178/*
1179 * copy a page of physical memory
1180 * specified in relocation units (NBPG bytes)
1181 */
1182void
1183physcopyseg(frm, to)
1184	int frm;
1185	int to;
1186{
1187
1188	*(int *)CMAP1 = PG_V | PG_KW | ctob(frm);
1189	*(int *)CMAP2 = PG_V | PG_KW | ctob(to);
1190	load_cr3(rcr3());
1191	bcopy(CADDR1, CADDR2, NBPG);
1192}
1193
1194/*aston() {
1195	schednetisr(NETISR_AST);
1196}*/
1197
1198void
1199setsoftclock() {
1200	schednetisr(NETISR_SCLK);
1201}
1202
1203/*
1204 * insert an element into a queue
1205 */
1206#undef insque
1207void				/* XXX replace with inline FIXME! */
1208_insque(element, head)
1209	register struct prochd *element, *head;
1210{
1211	element->ph_link = head->ph_link;
1212	head->ph_link = (struct proc *)element;
1213	element->ph_rlink = (struct proc *)head;
1214	((struct prochd *)(element->ph_link))->ph_rlink=(struct proc *)element;
1215}
1216
1217/*
1218 * remove an element from a queue
1219 */
1220#undef remque
1221void				/* XXX replace with inline FIXME! */
1222_remque(element)
1223	register struct prochd *element;
1224{
1225	((struct prochd *)(element->ph_link))->ph_rlink = element->ph_rlink;
1226	((struct prochd *)(element->ph_rlink))->ph_link = element->ph_link;
1227	element->ph_rlink = (struct proc *)0;
1228}
1229
1230/*
1231 * The registers are in the frame; the frame is in the user area of
1232 * the process in question; when the process is active, the registers
1233 * are in "the kernel stack"; when it's not, they're still there, but
1234 * things get flipped around.  So, since p->p_regs is the whole address
1235 * of the register set, take its offset from the kernel stack, and
1236 * index into the user block.  Don't you just *love* virtual memory?
1237 * (I'm starting to think seymour is right...)
1238 */
1239
1240int
1241ptrace_set_pc (struct proc *p, unsigned int addr) {
1242	void *regs = (char*)p->p_addr +
1243		((char*) p->p_regs - (char*) kstack);
1244
1245	((struct trapframe *)regs)->tf_eip = addr;
1246	return 0;
1247}
1248
1249int
1250ptrace_single_step (struct proc *p) {
1251	void *regs = (char*)p->p_addr +
1252		((char*) p->p_regs - (char*) kstack);
1253
1254	((struct trapframe *)regs)->tf_eflags |= PSL_T;
1255	return 0;
1256}
1257
1258/*
1259 * Copy the registers to user-space.
1260 */
1261
1262int
1263ptrace_getregs (struct proc *p, unsigned int *addr) {
1264	int error;
1265	struct regs regs = {0};
1266
1267	if (error = fill_regs (p, &regs))
1268		return error;
1269
1270	return copyout (&regs, addr, sizeof (regs));
1271}
1272
1273int
1274ptrace_setregs (struct proc *p, unsigned int *addr) {
1275	int error;
1276	struct regs regs = {0};
1277
1278	if (error = copyin (addr, &regs, sizeof(regs)))
1279		return error;
1280
1281	return set_regs (p, &regs);
1282}
1283
1284int
1285fill_regs(struct proc *p, struct regs *regs) {
1286	int error;
1287	struct trapframe *tp;
1288	void *ptr = (char*)p->p_addr +
1289		((char*) p->p_regs - (char*) kstack);
1290
1291	tp = ptr;
1292	regs->r_es = tp->tf_es;
1293	regs->r_ds = tp->tf_ds;
1294	regs->r_edi = tp->tf_edi;
1295	regs->r_esi = tp->tf_esi;
1296	regs->r_ebp = tp->tf_ebp;
1297	regs->r_ebx = tp->tf_ebx;
1298	regs->r_edx = tp->tf_edx;
1299	regs->r_ecx = tp->tf_ecx;
1300	regs->r_eax = tp->tf_eax;
1301	regs->r_eip = tp->tf_eip;
1302	regs->r_cs = tp->tf_cs;
1303	regs->r_eflags = tp->tf_eflags;
1304	regs->r_esp = tp->tf_esp;
1305	regs->r_ss = tp->tf_ss;
1306	return 0;
1307}
1308
1309int
1310set_regs (struct proc *p, struct regs *regs) {
1311	int error;
1312	struct trapframe *tp;
1313	void *ptr = (char*)p->p_addr +
1314		((char*) p->p_regs - (char*) kstack);
1315
1316	tp = ptr;
1317	tp->tf_es = regs->r_es;
1318	tp->tf_ds = regs->r_ds;
1319	tp->tf_edi = regs->r_edi;
1320	tp->tf_esi = regs->r_esi;
1321	tp->tf_ebp = regs->r_ebp;
1322	tp->tf_ebx = regs->r_ebx;
1323	tp->tf_edx = regs->r_edx;
1324	tp->tf_ecx = regs->r_ecx;
1325	tp->tf_eax = regs->r_eax;
1326	tp->tf_eip = regs->r_eip;
1327	tp->tf_cs = regs->r_cs;
1328	tp->tf_eflags = regs->r_eflags;
1329	tp->tf_esp = regs->r_esp;
1330	tp->tf_ss = regs->r_ss;
1331	return 0;
1332}
1333
1334#include "ddb.h"
1335#if NDDB <= 0
1336void
1337Debugger(const char *msg)
1338{
1339	printf("Debugger(\"%s\") called.", msg);
1340}
1341#endif /* no DDB */
1342