machdep.c revision 988
1/*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by the University of
20 *	California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	from: @(#)machdep.c	7.4 (Berkeley) 6/3/91
38 *	$Id: machdep.c,v 1.26 1994/01/20 17:21:28 davidg Exp $
39 */
40
41#include "npx.h"
42#include "isa.h"
43
44#include <stddef.h>
45#include "param.h"
46#include "systm.h"
47#include "signalvar.h"
48#include "kernel.h"
49#include "map.h"
50#include "proc.h"
51#include "user.h"
52#include "exec.h"            /* for PS_STRINGS */
53#include "buf.h"
54#include "reboot.h"
55#include "conf.h"
56#include "file.h"
57#include "callout.h"
58#include "malloc.h"
59#include "mbuf.h"
60#include "msgbuf.h"
61#include "net/netisr.h"
62
63#ifdef SYSVSHM
64#include "sys/shm.h"
65#endif
66
67#include "vm/vm.h"
68#include "vm/vm_kern.h"
69#include "vm/vm_page.h"
70
71#include "sys/exec.h"
72#include "sys/vnode.h"
73
74extern vm_offset_t avail_start, avail_end;
75
76#include "machine/cpu.h"
77#include "machine/reg.h"
78#include "machine/psl.h"
79#include "machine/specialreg.h"
80#include "machine/sysarch.h"
81#include "machine/cons.h"
82
83#include "i386/isa/isa.h"
84#include "i386/isa/rtc.h"
85
86static void identifycpu(void);
87static void initcpu(void);
88
89#ifndef PANIC_REBOOT_WAIT_TIME
90#define PANIC_REBOOT_WAIT_TIME 15 /* default to 15 seconds */
91#endif
92
93/*
94 * Declare these as initialized data so we can patch them.
95 */
96int	nswbuf = 0;
97#ifdef	NBUF
98int	nbuf = NBUF;
99#else
100int	nbuf = 0;
101#endif
102#ifdef	BUFPAGES
103int	bufpages = BUFPAGES;
104#else
105int	bufpages = 0;
106#endif
107extern int freebufspace;
108
109int _udatasel, _ucodesel;
110
111/*
112 * Machine-dependent startup code
113 */
114int boothowto = 0, Maxmem = 0;
115long dumplo;
116int physmem, maxmem;
117extern int bootdev;
118#ifdef SMALL
119extern int forcemaxmem;
120#endif
121int biosmem;
122
123vm_offset_t	phys_avail[6];
124
125extern cyloffset;
126
127int cpu_class;
128
129void dumpsys __P((void));
130
131void
132cpu_startup()
133{
134	register int unixsize;
135	register unsigned i;
136	register struct pte *pte;
137	int mapaddr, j;
138	register caddr_t v;
139	int maxbufs, base, residual;
140	extern long Usrptsize;
141	vm_offset_t minaddr, maxaddr;
142	vm_size_t size = 0;
143	int firstaddr;
144
145	/*
146	 * Initialize error message buffer (at end of core).
147	 */
148
149	/* avail_end was pre-decremented in pmap_bootstrap to compensate */
150	for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
151		pmap_enter(pmap_kernel(), (vm_offset_t)msgbufp,
152			   avail_end + i * NBPG,
153			   VM_PROT_ALL, TRUE);
154	msgbufmapped = 1;
155
156	/*
157	 * Good {morning,afternoon,evening,night}.
158	 */
159	printf(version);
160	identifycpu();
161	printf("real mem  = %d\n", ctob(physmem));
162
163	/*
164	 * Allocate space for system data structures.
165	 * The first available kernel virtual address is in "v".
166	 * As pages of kernel virtual memory are allocated, "v" is incremented.
167	 * As pages of memory are allocated and cleared,
168	 * "firstaddr" is incremented.
169	 * An index into the kernel page table corresponding to the
170	 * virtual memory address maintained in "v" is kept in "mapaddr".
171	 */
172
173	/*
174	 * Make two passes.  The first pass calculates how much memory is
175	 * needed and allocates it.  The second pass assigns virtual
176	 * addresses to the various data structures.
177	 */
178	firstaddr = 0;
179again:
180	v = (caddr_t)firstaddr;
181
182#define	valloc(name, type, num) \
183	    (name) = (type *)v; v = (caddr_t)((name)+(num))
184#define	valloclim(name, type, num, lim) \
185	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
186	valloc(callout, struct callout, ncallout);
187#ifdef SYSVSHM
188	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
189#endif
190	/*
191	 * Determine how many buffers to allocate.
192	 * Use 20% of memory of memory beyond the first 2MB
193	 * Insure a minimum of 16 fs buffers.
194	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
195	 */
196	if (bufpages == 0)
197		bufpages = ((physmem << PGSHIFT) - 2048*1024) / NBPG / 5;
198	if (bufpages < 64)
199		bufpages = 64;
200
201	/*
202	 * We must still limit the maximum number of buffers to be no
203	 * more than 2/5's of the size of the kernal malloc region, this
204	 * will only take effect for machines with lots of memory
205	 */
206	bufpages = min(bufpages, (VM_KMEM_SIZE / NBPG) * 2 / 5);
207	if (nbuf == 0) {
208		nbuf = bufpages / 2;
209		if (nbuf < 32)
210			nbuf = 32;
211	}
212	freebufspace = bufpages * NBPG;
213	if (nswbuf == 0) {
214		nswbuf = (nbuf / 2) &~ 1;	/* force even */
215		if (nswbuf > 256)
216			nswbuf = 256;		/* sanity */
217	}
218	valloc(swbuf, struct buf, nswbuf);
219	valloc(buf, struct buf, nbuf);
220
221	/*
222	 * End of first pass, size has been calculated so allocate memory
223	 */
224	if (firstaddr == 0) {
225		size = (vm_size_t)(v - firstaddr);
226		firstaddr = (int)kmem_alloc(kernel_map, round_page(size));
227		if (firstaddr == 0)
228			panic("startup: no room for tables");
229		goto again;
230	}
231	/*
232	 * End of second pass, addresses have been assigned
233	 */
234	if ((vm_size_t)(v - firstaddr) != size)
235		panic("startup: table size inconsistency");
236
237	/*
238	 * Allocate a submap for buffer space allocations.
239	 * XXX we are NOT using buffer_map, but due to
240	 * the references to it we will just allocate 1 page of
241	 * vm (not real memory) to make things happy...
242	 */
243	buffer_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
244				/* bufpages * */NBPG, TRUE);
245	/*
246	 * Allocate a submap for physio
247	 */
248	phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
249				 VM_PHYS_SIZE, TRUE);
250
251	/*
252	 * Finally, allocate mbuf pool.  Since mclrefcnt is an off-size
253	 * we use the more space efficient malloc in place of kmem_alloc.
254	 */
255	mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
256				   M_MBUF, M_NOWAIT);
257	bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
258	mb_map = kmem_suballoc(kmem_map, (vm_offset_t)&mbutl, &maxaddr,
259			       VM_MBUF_SIZE, FALSE);
260	/*
261	 * Initialize callouts
262	 */
263	callfree = callout;
264	for (i = 1; i < ncallout; i++)
265		callout[i-1].c_next = &callout[i];
266
267	printf("avail mem = %d\n", ptoa(vm_page_free_count));
268	printf("using %d buffers containing %d bytes of memory\n",
269		nbuf, bufpages * CLBYTES);
270
271	/*
272	 * Set up CPU-specific registers, cache, etc.
273	 */
274	initcpu();
275
276	/*
277	 * Set up buffers, so they can be used to read disk labels.
278	 */
279	bufinit();
280
281	/*
282	 * Configure the system.
283	 */
284	configure();
285}
286
287
288struct cpu_nameclass i386_cpus[] = {
289	{ "Intel 80286",	CPUCLASS_286 },		/* CPU_286   */
290	{ "i386SX",		CPUCLASS_386 },		/* CPU_386SX */
291	{ "i386DX",		CPUCLASS_386 },		/* CPU_386   */
292	{ "i486SX",		CPUCLASS_486 },		/* CPU_486SX */
293	{ "i486DX",		CPUCLASS_486 },		/* CPU_486   */
294	{ "i586",		CPUCLASS_586 },		/* CPU_586   */
295};
296
297static void
298identifycpu()
299{
300	printf("CPU: ");
301	if (cpu >= 0 && cpu < (sizeof i386_cpus/sizeof(struct cpu_nameclass))) {
302		printf("%s", i386_cpus[cpu].cpu_name);
303		cpu_class = i386_cpus[cpu].cpu_class;
304	} else {
305		printf("unknown cpu type %d\n", cpu);
306		panic("startup: bad cpu id");
307	}
308	printf(" (");
309	switch(cpu_class) {
310	case CPUCLASS_286:
311		printf("286");
312		break;
313	case CPUCLASS_386:
314		printf("386");
315		break;
316	case CPUCLASS_486:
317		printf("486");
318		break;
319	case CPUCLASS_586:
320		printf("586");
321		break;
322	default:
323		printf("unknown");	/* will panic below... */
324	}
325	printf("-class CPU)");
326	printf("\n");	/* cpu speed would be nice, but how? */
327
328	/*
329	 * Now that we have told the user what they have,
330	 * let them know if that machine type isn't configured.
331	 */
332	switch (cpu_class) {
333	case CPUCLASS_286:	/* a 286 should not make it this far, anyway */
334#if !defined(I386_CPU) && !defined(I486_CPU) && !defined(I586_CPU)
335#error This kernel is not configured for one of the supported CPUs
336#endif
337#if !defined(I386_CPU)
338	case CPUCLASS_386:
339#endif
340#if !defined(I486_CPU)
341	case CPUCLASS_486:
342#endif
343#if !defined(I586_CPU)
344	case CPUCLASS_586:
345#endif
346		panic("CPU class not configured");
347	default:
348		break;
349	}
350}
351
352#ifdef PGINPROF
353/*
354 * Return the difference (in microseconds)
355 * between the  current time and a previous
356 * time as represented  by the arguments.
357 * If there is a pending clock interrupt
358 * which has not been serviced due to high
359 * ipl, return error code.
360 */
361/*ARGSUSED*/
362vmtime(otime, olbolt, oicr)
363	register int otime, olbolt, oicr;
364{
365
366	return (((time.tv_sec-otime)*60 + lbolt-olbolt)*16667);
367}
368#endif
369
370extern int kstack[];
371
372/*
373 * Send an interrupt to process.
374 *
375 * Stack is set up to allow sigcode stored
376 * in u. to call routine, followed by kcall
377 * to sigreturn routine below.  After sigreturn
378 * resets the signal mask, the stack, and the
379 * frame pointer, it returns to the user
380 * specified pc, psl.
381 */
382void
383sendsig(catcher, sig, mask, code)
384	sig_t catcher;
385	int sig, mask;
386	unsigned code;
387{
388	register struct proc *p = curproc;
389	register int *regs;
390	register struct sigframe *fp;
391	struct sigacts *ps = p->p_sigacts;
392	int oonstack, frmtrap;
393
394	regs = p->p_regs;
395        oonstack = ps->ps_onstack;
396	/*
397	 * Allocate and validate space for the signal handler
398	 * context. Note that if the stack is in P0 space, the
399	 * call to grow() is a nop, and the useracc() check
400	 * will fail if the process has not already allocated
401	 * the space with a `brk'.
402	 */
403        if (!ps->ps_onstack && (ps->ps_sigonstack & sigmask(sig))) {
404		fp = (struct sigframe *)(ps->ps_sigsp
405				- sizeof(struct sigframe));
406                ps->ps_onstack = 1;
407	} else {
408		fp = (struct sigframe *)(regs[tESP]
409			- sizeof(struct sigframe));
410	}
411
412	if (useracc((caddr_t)fp, sizeof (struct sigframe), B_WRITE) == 0) {
413		/*
414		 * Process has trashed its stack; give it an illegal
415		 * instruction to halt it in its tracks.
416		 */
417		SIGACTION(p, SIGILL) = SIG_DFL;
418		sig = sigmask(SIGILL);
419		p->p_sigignore &= ~sig;
420		p->p_sigcatch &= ~sig;
421		p->p_sigmask &= ~sig;
422		psignal(p, SIGILL);
423		return;
424	}
425
426	/*
427	 * Build the argument list for the signal handler.
428	 */
429	fp->sf_signum = sig;
430	fp->sf_code = code;
431	fp->sf_scp = &fp->sf_sc;
432	fp->sf_addr = (char *) regs[tERR];
433	fp->sf_handler = catcher;
434
435	/* save scratch registers */
436	fp->sf_eax = regs[tEAX];
437	fp->sf_edx = regs[tEDX];
438	fp->sf_ecx = regs[tECX];
439
440	/*
441	 * Build the signal context to be used by sigreturn.
442	 */
443	fp->sf_sc.sc_onstack = oonstack;
444	fp->sf_sc.sc_mask = mask;
445	fp->sf_sc.sc_sp = regs[tESP];
446	fp->sf_sc.sc_fp = regs[tEBP];
447	fp->sf_sc.sc_pc = regs[tEIP];
448	fp->sf_sc.sc_ps = regs[tEFLAGS];
449	regs[tESP] = (int)fp;
450	regs[tEIP] = (int)((struct pcb *)kstack)->pcb_sigc;
451}
452
453/*
454 * System call to cleanup state after a signal
455 * has been taken.  Reset signal mask and
456 * stack state from context left by sendsig (above).
457 * Return to previous pc and psl as specified by
458 * context left by sendsig. Check carefully to
459 * make sure that the user has not modified the
460 * psl to gain improper priviledges or to cause
461 * a machine fault.
462 */
463struct sigreturn_args {
464	struct sigcontext *sigcntxp;
465};
466
467int
468sigreturn(p, uap, retval)
469	struct proc *p;
470	struct sigreturn_args *uap;
471	int *retval;
472{
473	register struct sigcontext *scp;
474	register struct sigframe *fp;
475	register int *regs = p->p_regs;
476
477	/*
478	 * (XXX old comment) regs[tESP] points to the return address.
479	 * The user scp pointer is above that.
480	 * The return address is faked in the signal trampoline code
481	 * for consistency.
482	 */
483	scp = uap->sigcntxp;
484	fp = (struct sigframe *)
485	     ((caddr_t)scp - offsetof(struct sigframe, sf_sc));
486
487	if (useracc((caddr_t)fp, sizeof (*fp), 0) == 0)
488		return(EINVAL);
489
490	/* restore scratch registers */
491	regs[tEAX] = fp->sf_eax ;
492	regs[tEDX] = fp->sf_edx ;
493	regs[tECX] = fp->sf_ecx ;
494
495	if (useracc((caddr_t)scp, sizeof (*scp), 0) == 0)
496		return(EINVAL);
497#ifdef notyet
498	if ((scp->sc_ps & PSL_MBZ) != 0 || (scp->sc_ps & PSL_MBO) != PSL_MBO) {
499		return(EINVAL);
500	}
501#endif
502        p->p_sigacts->ps_onstack = scp->sc_onstack & 01;
503	p->p_sigmask = scp->sc_mask &~
504	    (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
505	regs[tEBP] = scp->sc_fp;
506	regs[tESP] = scp->sc_sp;
507	regs[tEIP] = scp->sc_pc;
508	regs[tEFLAGS] = scp->sc_ps;
509	return(EJUSTRETURN);
510}
511
512/*
513 * a simple function to make the system panic (and dump a vmcore)
514 * in a predictable fashion
515 */
516void diediedie()
517{
518	panic("because you said to!");
519}
520
521int	waittime = -1;
522struct pcb dumppcb;
523
524void
525boot(arghowto)
526	int arghowto;
527{
528	register long dummy;		/* r12 is reserved */
529	register int howto;		/* r11 == how to boot */
530	register int devtype;		/* r10 == major of root dev */
531	extern int cold;
532	int nomsg = 1;
533
534	if (cold) {
535		printf("hit reset please");
536		for(;;);
537	}
538	howto = arghowto;
539	if ((howto&RB_NOSYNC) == 0 && waittime < 0 && bfreelist[0].b_forw) {
540		register struct buf *bp;
541		int iter, nbusy;
542
543		waittime = 0;
544		(void) splnet();
545		printf("syncing disks... ");
546		/*
547		 * Release inodes held by texts before update.
548		 */
549		if (panicstr == 0)
550			vnode_pager_umount(NULL);
551		sync((struct sigcontext *)0);
552		/*
553		 * Unmount filesystems
554		 */
555#if 0
556		if (panicstr == 0)
557			vfs_unmountall();
558#endif
559
560		for (iter = 0; iter < 20; iter++) {
561			nbusy = 0;
562			for (bp = &buf[nbuf]; --bp >= buf; )
563				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
564					nbusy++;
565			if (nbusy == 0)
566				break;
567			if (nomsg) {
568				printf("updating disks before rebooting... ");
569				nomsg = 0;
570			}
571			printf("%d ", nbusy);
572			DELAY(40000 * iter);
573		}
574		if (nbusy)
575			printf("giving up\n");
576		else
577			printf("done\n");
578		DELAY(10000);			/* wait for printf to finish */
579	}
580	splhigh();
581	devtype = major(rootdev);
582	if (howto&RB_HALT) {
583		printf("\n");
584		printf("The operating system has halted.\n");
585		printf("Please press any key to reboot.\n\n");
586		cngetc();
587	} else {
588		if (howto & RB_DUMP) {
589			savectx(&dumppcb, 0);
590			dumppcb.pcb_ptd = rcr3();
591			dumpsys();
592
593			if (PANIC_REBOOT_WAIT_TIME != 0) {
594				if (PANIC_REBOOT_WAIT_TIME != -1) {
595					int loop;
596					printf("Automatic reboot in %d seconds - press a key on the console to abort\n",
597						PANIC_REBOOT_WAIT_TIME);
598					for (loop = PANIC_REBOOT_WAIT_TIME; loop > 0; --loop) {
599						DELAY(1000 * 1000); /* one second */
600						if (sgetc(1)) /* Did user type a key? */
601							break;
602					}
603					if (!loop)
604						goto die;
605				}
606			} else { /* zero time specified - reboot NOW */
607				goto die;
608			}
609			printf("--> Press a key on the console to reboot <--\n");
610			cngetc();
611		}
612	}
613#ifdef lint
614	dummy = 0; dummy = dummy;
615	printf("howto %d, devtype %d\n", arghowto, devtype);
616#endif
617die:
618	printf("Rebooting...\n");
619	DELAY (100000);	/* wait 100ms for printf's to complete */
620	cpu_reset();
621	for(;;) ;
622	/*NOTREACHED*/
623}
624
625unsigned long	dumpmag = 0x8fca0101UL;	/* magic number for savecore */
626int		dumpsize = 0;		/* also for savecore */
627/*
628 * Doadump comes here after turning off memory management and
629 * getting on the dump stack, either when called above, or by
630 * the auto-restart code.
631 */
632void
633dumpsys()
634{
635
636	if (dumpdev == NODEV)
637		return;
638	if ((minor(dumpdev)&07) != 1)
639		return;
640	dumpsize = physmem;
641	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
642	printf("dump ");
643	switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
644
645	case ENXIO:
646		printf("device bad\n");
647		break;
648
649	case EFAULT:
650		printf("device not ready\n");
651		break;
652
653	case EINVAL:
654		printf("area improper\n");
655		break;
656
657	case EIO:
658		printf("i/o error\n");
659		break;
660
661	case EINTR:
662		printf("aborted from console\n");
663		break;
664
665	default:
666		printf("succeeded\n");
667		break;
668	}
669}
670
671#ifdef HZ
672/*
673 * If HZ is defined we use this code, otherwise the code in
674 * /sys/i386/i386/microtime.s is used.  The othercode only works
675 * for HZ=100.
676 */
677microtime(tvp)
678	register struct timeval *tvp;
679{
680	int s = splhigh();
681
682	*tvp = time;
683	tvp->tv_usec += tick;
684	while (tvp->tv_usec > 1000000) {
685		tvp->tv_sec++;
686		tvp->tv_usec -= 1000000;
687	}
688	splx(s);
689}
690#endif /* HZ */
691
692void
693physstrat(bp, strat, prio)
694	struct buf *bp;
695	int (*strat)(), prio;
696{
697	register int s;
698	caddr_t baddr;
699
700	vmapbuf(bp);
701	(*strat)(bp);
702	/* pageout daemon doesn't wait for pushed pages */
703	if (bp->b_flags & B_DIRTY)
704		return;
705	s = splbio();
706	while ((bp->b_flags & B_DONE) == 0)
707	  tsleep((caddr_t)bp, prio, "physstr", 0);
708	splx(s);
709	vunmapbuf(bp);
710}
711
712static void
713initcpu()
714{
715}
716
717/*
718 * Clear registers on exec
719 */
720void
721setregs(p, entry, stack)
722	struct proc *p;
723	u_long entry;
724	u_long stack;
725{
726	p->p_regs[tEBP] = 0;	/* bottom of the fp chain */
727	p->p_regs[tEIP] = entry;
728	p->p_regs[tESP] = stack;
729	p->p_regs[tSS] = _udatasel;
730	p->p_regs[tDS] = _udatasel;
731	p->p_regs[tES] = _udatasel;
732	p->p_regs[tCS] = _ucodesel;
733
734	p->p_addr->u_pcb.pcb_flags = 0;	/* no fp at all */
735	load_cr0(rcr0() | CR0_TS);	/* start emulating */
736#if	NNPX > 0
737	npxinit(__INITIAL_NPXCW__);
738#endif	/* NNPX > 0 */
739}
740
741/*
742 * Initialize 386 and configure to run kernel
743 */
744
745/*
746 * Initialize segments & interrupt table
747 */
748#define DESCRIPTOR_SIZE	8
749
750#define	GNULL_SEL	0	/* Null Descriptor */
751#define	GCODE_SEL	1	/* Kernel Code Descriptor */
752#define	GDATA_SEL	2	/* Kernel Data Descriptor */
753#define	GLDT_SEL	3	/* LDT - eventually one per process */
754#define	GTGATE_SEL	4	/* Process task switch gate */
755#define	GPANIC_SEL	5	/* Task state to consider panic from */
756#define	GPROC0_SEL	6	/* Task state process slot zero and up */
757#define NGDT 	GPROC0_SEL+1
758
759unsigned char gdt[GPROC0_SEL+1][DESCRIPTOR_SIZE];
760
761/* interrupt descriptor table */
762struct gate_descriptor idt[NIDT];
763
764/* local descriptor table */
765unsigned char ldt[5][DESCRIPTOR_SIZE];
766#define	LSYS5CALLS_SEL	0	/* forced by intel BCS */
767#define	LSYS5SIGR_SEL	1
768
769#define	L43BSDCALLS_SEL	2	/* notyet */
770#define	LUCODE_SEL	3
771#define	LUDATA_SEL	4
772/* seperate stack, es,fs,gs sels ? */
773/* #define	LPOSIXCALLS_SEL	5*/	/* notyet */
774
775struct	i386tss	tss, panic_tss;
776
777extern  struct user *proc0paddr;
778
779/* software prototypes -- in more palatable form */
780struct soft_segment_descriptor gdt_segs[] = {
781	/* Null Descriptor */
782{	0x0,			/* segment base address  */
783	0x0,			/* length */
784	0,			/* segment type */
785	0,			/* segment descriptor priority level */
786	0,			/* segment descriptor present */
787	0, 0,
788	0,			/* default 32 vs 16 bit size */
789	0  			/* limit granularity (byte/page units)*/ },
790	/* Code Descriptor for kernel */
791{	0x0,			/* segment base address  */
792	0xfffff,		/* length - all address space */
793	SDT_MEMERA,		/* segment type */
794	0,			/* segment descriptor priority level */
795	1,			/* segment descriptor present */
796	0, 0,
797	1,			/* default 32 vs 16 bit size */
798	1  			/* limit granularity (byte/page units)*/ },
799	/* Data Descriptor for kernel */
800{	0x0,			/* segment base address  */
801	0xfffff,		/* length - all address space */
802	SDT_MEMRWA,		/* segment type */
803	0,			/* segment descriptor priority level */
804	1,			/* segment descriptor present */
805	0, 0,
806	1,			/* default 32 vs 16 bit size */
807	1  			/* limit granularity (byte/page units)*/ },
808	/* LDT Descriptor */
809{	(int) ldt,			/* segment base address  */
810	sizeof(ldt)-1,		/* length - all address space */
811	SDT_SYSLDT,		/* segment type */
812	0,			/* segment descriptor priority level */
813	1,			/* segment descriptor present */
814	0, 0,
815	0,			/* unused - default 32 vs 16 bit size */
816	0  			/* limit granularity (byte/page units)*/ },
817	/* Null Descriptor - Placeholder */
818{	0x0,			/* segment base address  */
819	0x0,			/* length - all address space */
820	0,			/* segment type */
821	0,			/* segment descriptor priority level */
822	0,			/* segment descriptor present */
823	0, 0,
824	0,			/* default 32 vs 16 bit size */
825	0  			/* limit granularity (byte/page units)*/ },
826	/* Panic Tss Descriptor */
827{	(int) &panic_tss,		/* segment base address  */
828	sizeof(tss)-1,		/* length - all address space */
829	SDT_SYS386TSS,		/* segment type */
830	0,			/* segment descriptor priority level */
831	1,			/* segment descriptor present */
832	0, 0,
833	0,			/* unused - default 32 vs 16 bit size */
834	0  			/* limit granularity (byte/page units)*/ },
835	/* Proc 0 Tss Descriptor */
836{	(int) kstack,			/* segment base address  */
837	sizeof(tss)-1,		/* length - all address space */
838	SDT_SYS386TSS,		/* segment type */
839	0,			/* segment descriptor priority level */
840	1,			/* segment descriptor present */
841	0, 0,
842	0,			/* unused - default 32 vs 16 bit size */
843	0  			/* limit granularity (byte/page units)*/ }};
844
845struct soft_segment_descriptor ldt_segs[] = {
846	/* Null Descriptor - overwritten by call gate */
847{	0x0,			/* segment base address  */
848	0x0,			/* length - all address space */
849	0,			/* segment type */
850	0,			/* segment descriptor priority level */
851	0,			/* segment descriptor present */
852	0, 0,
853	0,			/* default 32 vs 16 bit size */
854	0  			/* limit granularity (byte/page units)*/ },
855	/* Null Descriptor - overwritten by call gate */
856{	0x0,			/* segment base address  */
857	0x0,			/* length - all address space */
858	0,			/* segment type */
859	0,			/* segment descriptor priority level */
860	0,			/* segment descriptor present */
861	0, 0,
862	0,			/* default 32 vs 16 bit size */
863	0  			/* limit granularity (byte/page units)*/ },
864	/* Null Descriptor - overwritten by call gate */
865{	0x0,			/* segment base address  */
866	0x0,			/* length - all address space */
867	0,			/* segment type */
868	0,			/* segment descriptor priority level */
869	0,			/* segment descriptor present */
870	0, 0,
871	0,			/* default 32 vs 16 bit size */
872	0  			/* limit granularity (byte/page units)*/ },
873	/* Code Descriptor for user */
874{	0x0,			/* segment base address  */
875	0xfffff,		/* length - all address space */
876	SDT_MEMERA,		/* segment type */
877	SEL_UPL,		/* segment descriptor priority level */
878	1,			/* segment descriptor present */
879	0, 0,
880	1,			/* default 32 vs 16 bit size */
881	1  			/* limit granularity (byte/page units)*/ },
882	/* Data Descriptor for user */
883{	0x0,			/* segment base address  */
884	0xfffff,		/* length - all address space */
885	SDT_MEMRWA,		/* segment type */
886	SEL_UPL,		/* segment descriptor priority level */
887	1,			/* segment descriptor present */
888	0, 0,
889	1,			/* default 32 vs 16 bit size */
890	1  			/* limit granularity (byte/page units)*/ } };
891
892void
893setidt(idx, func, typ, dpl)
894	int idx;
895	void (*func)();
896	int typ;
897	int dpl;
898{
899	struct gate_descriptor *ip = idt + idx;
900
901	ip->gd_looffset = (int)func;
902	ip->gd_selector = 8;
903	ip->gd_stkcpy = 0;
904	ip->gd_xx = 0;
905	ip->gd_type = typ;
906	ip->gd_dpl = dpl;
907	ip->gd_p = 1;
908	ip->gd_hioffset = ((int)func)>>16 ;
909}
910
911#define	IDTVEC(name)	__CONCAT(X, name)
912typedef void idtvec_t();
913
914extern idtvec_t
915	IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
916	IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(dble), IDTVEC(fpusegm),
917	IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
918	IDTVEC(page), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(rsvd0),
919	IDTVEC(rsvd1), IDTVEC(rsvd2), IDTVEC(rsvd3), IDTVEC(rsvd4),
920	IDTVEC(rsvd5), IDTVEC(rsvd6), IDTVEC(rsvd7), IDTVEC(rsvd8),
921	IDTVEC(rsvd9), IDTVEC(rsvd10), IDTVEC(rsvd11), IDTVEC(rsvd12),
922	IDTVEC(rsvd13), IDTVEC(rsvd14), IDTVEC(rsvd14), IDTVEC(syscall);
923
924int _gsel_tss;
925
926void
927init386(first)
928	int first;
929{
930	extern ssdtosd(), lgdt(), lidt(), lldt(), etext;
931	int x, *pi;
932	unsigned biosbasemem, biosextmem;
933	struct gate_descriptor *gdp;
934	extern int sigcode,szsigcode;
935	/* table descriptors - used to load tables by microp */
936	unsigned short	r_gdt[3], r_idt[3];
937	int	pagesinbase, pagesinext;
938
939
940	proc0.p_addr = proc0paddr;
941
942	/*
943	 * Initialize the console before we print anything out.
944	 */
945
946	cninit ();
947
948	/*
949	 * make gdt memory segments, the code segment goes up to end of the
950	 * page with etext in it, the data segment goes to the end of
951	 * the address space
952	 */
953	gdt_segs[GCODE_SEL].ssd_limit = i386_btop(i386_round_page(&etext)) - 1;
954	gdt_segs[GDATA_SEL].ssd_limit = 0xffffffffUL;	/* XXX constant? */
955	for (x=0; x < NGDT; x++) ssdtosd(gdt_segs+x, gdt+x);
956	/* make ldt memory segments */
957	/*
958	 * The data segment limit must not cover the user area because we
959	 * don't want the user area to be writable in copyout() etc. (page
960	 * level protection is lost in kernel mode on 386's).  Also, we
961	 * don't want the user area to be writable directly (page level
962	 * protection of the user area is not available on 486's with
963	 * CR0_WP set, because there is no user-read/kernel-write mode).
964	 *
965	 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max.  And it
966	 * should be spelled ...MAX_USER...
967	 */
968#define VM_END_USER_RW_ADDRESS	VM_MAXUSER_ADDRESS
969	/*
970	 * The code segment limit has to cover the user area until we move
971	 * the signal trampoline out of the user area.  This is safe because
972	 * the code segment cannot be written to directly.
973	 */
974#define VM_END_USER_R_ADDRESS	(VM_END_USER_RW_ADDRESS + UPAGES * NBPG)
975	ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1;
976	ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1;
977	/* Note. eventually want private ldts per process */
978	for (x=0; x < 5; x++) ssdtosd(ldt_segs+x, ldt+x);
979
980	/* exceptions */
981	setidt(0, &IDTVEC(div),  SDT_SYS386TGT, SEL_KPL);
982	setidt(1, &IDTVEC(dbg),  SDT_SYS386TGT, SEL_KPL);
983	setidt(2, &IDTVEC(nmi),  SDT_SYS386TGT, SEL_KPL);
984 	setidt(3, &IDTVEC(bpt),  SDT_SYS386TGT, SEL_UPL);
985	setidt(4, &IDTVEC(ofl),  SDT_SYS386TGT, SEL_KPL);
986	setidt(5, &IDTVEC(bnd),  SDT_SYS386TGT, SEL_KPL);
987	setidt(6, &IDTVEC(ill),  SDT_SYS386TGT, SEL_KPL);
988	setidt(7, &IDTVEC(dna),  SDT_SYS386TGT, SEL_KPL);
989	setidt(8, &IDTVEC(dble),  SDT_SYS386TGT, SEL_KPL);
990	setidt(9, &IDTVEC(fpusegm),  SDT_SYS386TGT, SEL_KPL);
991	setidt(10, &IDTVEC(tss),  SDT_SYS386TGT, SEL_KPL);
992	setidt(11, &IDTVEC(missing),  SDT_SYS386TGT, SEL_KPL);
993	setidt(12, &IDTVEC(stk),  SDT_SYS386TGT, SEL_KPL);
994	setidt(13, &IDTVEC(prot),  SDT_SYS386TGT, SEL_KPL);
995	setidt(14, &IDTVEC(page),  SDT_SYS386TGT, SEL_KPL);
996	setidt(15, &IDTVEC(rsvd),  SDT_SYS386TGT, SEL_KPL);
997	setidt(16, &IDTVEC(fpu),  SDT_SYS386TGT, SEL_KPL);
998	setidt(17, &IDTVEC(rsvd0),  SDT_SYS386TGT, SEL_KPL);
999	setidt(18, &IDTVEC(rsvd1),  SDT_SYS386TGT, SEL_KPL);
1000	setidt(19, &IDTVEC(rsvd2),  SDT_SYS386TGT, SEL_KPL);
1001	setidt(20, &IDTVEC(rsvd3),  SDT_SYS386TGT, SEL_KPL);
1002	setidt(21, &IDTVEC(rsvd4),  SDT_SYS386TGT, SEL_KPL);
1003	setidt(22, &IDTVEC(rsvd5),  SDT_SYS386TGT, SEL_KPL);
1004	setidt(23, &IDTVEC(rsvd6),  SDT_SYS386TGT, SEL_KPL);
1005	setidt(24, &IDTVEC(rsvd7),  SDT_SYS386TGT, SEL_KPL);
1006	setidt(25, &IDTVEC(rsvd8),  SDT_SYS386TGT, SEL_KPL);
1007	setidt(26, &IDTVEC(rsvd9),  SDT_SYS386TGT, SEL_KPL);
1008	setidt(27, &IDTVEC(rsvd10),  SDT_SYS386TGT, SEL_KPL);
1009	setidt(28, &IDTVEC(rsvd11),  SDT_SYS386TGT, SEL_KPL);
1010	setidt(29, &IDTVEC(rsvd12),  SDT_SYS386TGT, SEL_KPL);
1011	setidt(30, &IDTVEC(rsvd13),  SDT_SYS386TGT, SEL_KPL);
1012	setidt(31, &IDTVEC(rsvd14),  SDT_SYS386TGT, SEL_KPL);
1013
1014#include	"isa.h"
1015#if	NISA >0
1016	isa_defaultirq();
1017#endif
1018
1019	r_gdt[0] = (unsigned short) (sizeof(gdt) - 1);
1020	r_gdt[1] = (unsigned short) ((int) gdt & 0xffff);
1021	r_gdt[2] = (unsigned short) ((int) gdt >> 16);
1022	lgdt(&r_gdt);
1023	r_idt[0] = (unsigned short) (sizeof(idt) - 1);
1024	r_idt[1] = (unsigned short) ((int) idt & 0xfffff);
1025	r_idt[2] = (unsigned short) ((int) idt >> 16);
1026	lidt(&r_idt);
1027	lldt(GSEL(GLDT_SEL, SEL_KPL));
1028
1029#include "ddb.h"
1030#if NDDB > 0
1031	kdb_init();
1032	if (boothowto & RB_KDB)
1033		Debugger("Boot flags requested debugger");
1034#endif
1035
1036	/* Use BIOS values stored in RTC CMOS RAM, since probing
1037	 * breaks certain 386 AT relics.
1038	 */
1039	biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8);
1040	biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8);
1041
1042	/*
1043	 * If BIOS tells us that it has more than 640k in the basemem,
1044	 *	don't believe it - set it to 640k.
1045	 */
1046	if (biosbasemem > 640)
1047		biosbasemem = 640;
1048
1049	/*
1050	 * Some 386 machines might give us a bogus number for extended
1051	 *	mem. If this happens, stop now.
1052	 */
1053#ifndef LARGEMEM
1054	if (biosextmem > 65536) {
1055		panic("extended memory beyond limit of 64MB");
1056		/* NOT REACHED */
1057	}
1058#endif
1059
1060	pagesinbase = biosbasemem * 1024 / NBPG;
1061	pagesinext = biosextmem * 1024 / NBPG;
1062
1063	/*
1064	 * Special hack for chipsets that still remap the 384k hole when
1065	 *	there's 16MB of memory - this really confuses people that
1066	 *	are trying to use bus mastering ISA controllers with the
1067	 *	"16MB limit"; they only have 16MB, but the remapping puts
1068	 *	them beyond the limit.
1069	 * XXX - this should be removed when bounce buffers are
1070	 *	implemented.
1071	 */
1072	/*
1073	 * If extended memory is between 15-16MB (16-17MB phys address range),
1074	 *	chop it to 15MB.
1075	 */
1076	if ((pagesinext > 3840) && (pagesinext < 4096))
1077		pagesinext = 3840;
1078
1079	/*
1080	 * Maxmem isn't the "maximum memory", it's the highest page of
1081	 * of the physical address space. It should be "Maxphyspage".
1082	 */
1083	Maxmem = pagesinext + 0x100000/NBPG;
1084
1085#ifdef MAXMEM
1086	if (MAXMEM/4 < Maxmem)
1087		Maxmem = MAXMEM/4;
1088#endif
1089	maxmem = Maxmem - 1;	/* highest page of usable memory */
1090	physmem = maxmem;	/* number of pages of physmem addr space */
1091
1092	if (Maxmem < 2048/4) {
1093		panic("Too little memory (2MB required)");
1094		/* NOT REACHED */
1095	}
1096
1097	/* call pmap initialization to make new kernel address space */
1098	pmap_bootstrap (first, 0);
1099
1100	/*
1101	 * Initialize pointers to the two chunks of memory; for use
1102	 *	later in vm_page_startup.
1103	 */
1104	/* avail_start and avail_end are initialized in pmap_bootstrap */
1105	x = 0;
1106	if (pagesinbase > 1) {
1107		phys_avail[x++] = NBPG;		/* skip first page of memory */
1108		phys_avail[x++] = pagesinbase * NBPG;	/* memory up to the ISA hole */
1109	}
1110	phys_avail[x++] = avail_start;	/* memory up to the end */
1111	phys_avail[x++] = avail_end;
1112	phys_avail[x++] = 0;		/* no more chunks */
1113	phys_avail[x++] = 0;
1114
1115	/* now running on new page tables, configured,and u/iom is accessible */
1116
1117	/* make a initial tss so microp can get interrupt stack on syscall! */
1118	proc0.p_addr->u_pcb.pcb_tss.tss_esp0 = (int) kstack + UPAGES*NBPG;
1119	proc0.p_addr->u_pcb.pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
1120	_gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
1121
1122	((struct i386tss *)gdt_segs[GPROC0_SEL].ssd_base)->tss_ioopt =
1123		(sizeof(tss))<<16;
1124
1125	ltr(_gsel_tss);
1126
1127	/* make a call gate to reenter kernel with */
1128	gdp = (struct gate_descriptor *) &ldt[LSYS5CALLS_SEL][0];
1129
1130	x = (int) &IDTVEC(syscall);
1131	gdp->gd_looffset = x++;
1132	gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
1133	gdp->gd_stkcpy = 1;
1134	gdp->gd_type = SDT_SYS386CGT;
1135	gdp->gd_dpl = SEL_UPL;
1136	gdp->gd_p = 1;
1137	gdp->gd_hioffset = ((int) &IDTVEC(syscall)) >>16;
1138
1139	/* transfer to user mode */
1140
1141	_ucodesel = LSEL(LUCODE_SEL, SEL_UPL);
1142	_udatasel = LSEL(LUDATA_SEL, SEL_UPL);
1143
1144	/* setup proc 0's pcb */
1145	bcopy(&sigcode, proc0.p_addr->u_pcb.pcb_sigc, szsigcode);
1146	proc0.p_addr->u_pcb.pcb_flags = 0;
1147	proc0.p_addr->u_pcb.pcb_ptd = IdlePTD;
1148}
1149
1150extern struct pte	*CMAP1, *CMAP2;
1151extern caddr_t		CADDR1, CADDR2;
1152/*
1153 * zero out physical memory
1154 * specified in relocation units (NBPG bytes)
1155 */
1156void
1157clearseg(n)
1158	int n;
1159{
1160
1161	*(int *)CMAP2 = PG_V | PG_KW | ctob(n);
1162	load_cr3(rcr3());
1163	bzero(CADDR2,NBPG);
1164	*(int *) CADDR2 = 0;
1165}
1166
1167/*
1168 * copy a page of physical memory
1169 * specified in relocation units (NBPG bytes)
1170 */
1171void
1172copyseg(frm, n)
1173	int frm;
1174	int n;
1175{
1176
1177	*(int *)CMAP2 = PG_V | PG_KW | ctob(n);
1178	load_cr3(rcr3());
1179	bcopy((void *)frm, (void *)CADDR2, NBPG);
1180}
1181
1182/*
1183 * copy a page of physical memory
1184 * specified in relocation units (NBPG bytes)
1185 */
1186void
1187physcopyseg(frm, to)
1188	int frm;
1189	int to;
1190{
1191
1192	*(int *)CMAP1 = PG_V | PG_KW | ctob(frm);
1193	*(int *)CMAP2 = PG_V | PG_KW | ctob(to);
1194	load_cr3(rcr3());
1195	bcopy(CADDR1, CADDR2, NBPG);
1196}
1197
1198/*aston() {
1199	schednetisr(NETISR_AST);
1200}*/
1201
1202void
1203setsoftclock() {
1204	schednetisr(NETISR_SCLK);
1205}
1206
1207/*
1208 * insert an element into a queue
1209 */
1210#undef insque
1211void				/* XXX replace with inline FIXME! */
1212_insque(element, head)
1213	register struct prochd *element, *head;
1214{
1215	element->ph_link = head->ph_link;
1216	head->ph_link = (struct proc *)element;
1217	element->ph_rlink = (struct proc *)head;
1218	((struct prochd *)(element->ph_link))->ph_rlink=(struct proc *)element;
1219}
1220
1221/*
1222 * remove an element from a queue
1223 */
1224#undef remque
1225void				/* XXX replace with inline FIXME! */
1226_remque(element)
1227	register struct prochd *element;
1228{
1229	((struct prochd *)(element->ph_link))->ph_rlink = element->ph_rlink;
1230	((struct prochd *)(element->ph_rlink))->ph_link = element->ph_link;
1231	element->ph_rlink = (struct proc *)0;
1232}
1233
1234/*
1235 * The registers are in the frame; the frame is in the user area of
1236 * the process in question; when the process is active, the registers
1237 * are in "the kernel stack"; when it's not, they're still there, but
1238 * things get flipped around.  So, since p->p_regs is the whole address
1239 * of the register set, take its offset from the kernel stack, and
1240 * index into the user block.  Don't you just *love* virtual memory?
1241 * (I'm starting to think seymour is right...)
1242 */
1243
1244int
1245ptrace_set_pc (struct proc *p, unsigned int addr) {
1246	void *regs = (char*)p->p_addr +
1247		((char*) p->p_regs - (char*) kstack);
1248
1249	((struct trapframe *)regs)->tf_eip = addr;
1250	return 0;
1251}
1252
1253int
1254ptrace_single_step (struct proc *p) {
1255	void *regs = (char*)p->p_addr +
1256		((char*) p->p_regs - (char*) kstack);
1257
1258	((struct trapframe *)regs)->tf_eflags |= PSL_T;
1259	return 0;
1260}
1261
1262/*
1263 * Copy the registers to user-space.
1264 */
1265
1266int
1267ptrace_getregs (struct proc *p, unsigned int *addr) {
1268	int error;
1269	struct regs regs = {0};
1270
1271	if (error = fill_regs (p, &regs))
1272		return error;
1273
1274	return copyout (&regs, addr, sizeof (regs));
1275}
1276
1277int
1278ptrace_setregs (struct proc *p, unsigned int *addr) {
1279	int error;
1280	struct regs regs = {0};
1281
1282	if (error = copyin (addr, &regs, sizeof(regs)))
1283		return error;
1284
1285	return set_regs (p, &regs);
1286}
1287
1288int
1289fill_regs(struct proc *p, struct regs *regs) {
1290	int error;
1291	struct trapframe *tp;
1292	void *ptr = (char*)p->p_addr +
1293		((char*) p->p_regs - (char*) kstack);
1294
1295	tp = ptr;
1296	regs->r_es = tp->tf_es;
1297	regs->r_ds = tp->tf_ds;
1298	regs->r_edi = tp->tf_edi;
1299	regs->r_esi = tp->tf_esi;
1300	regs->r_ebp = tp->tf_ebp;
1301	regs->r_ebx = tp->tf_ebx;
1302	regs->r_edx = tp->tf_edx;
1303	regs->r_ecx = tp->tf_ecx;
1304	regs->r_eax = tp->tf_eax;
1305	regs->r_eip = tp->tf_eip;
1306	regs->r_cs = tp->tf_cs;
1307	regs->r_eflags = tp->tf_eflags;
1308	regs->r_esp = tp->tf_esp;
1309	regs->r_ss = tp->tf_ss;
1310	return 0;
1311}
1312
1313int
1314set_regs (struct proc *p, struct regs *regs) {
1315	int error;
1316	struct trapframe *tp;
1317	void *ptr = (char*)p->p_addr +
1318		((char*) p->p_regs - (char*) kstack);
1319
1320	tp = ptr;
1321	tp->tf_es = regs->r_es;
1322	tp->tf_ds = regs->r_ds;
1323	tp->tf_edi = regs->r_edi;
1324	tp->tf_esi = regs->r_esi;
1325	tp->tf_ebp = regs->r_ebp;
1326	tp->tf_ebx = regs->r_ebx;
1327	tp->tf_edx = regs->r_edx;
1328	tp->tf_ecx = regs->r_ecx;
1329	tp->tf_eax = regs->r_eax;
1330	tp->tf_eip = regs->r_eip;
1331	tp->tf_cs = regs->r_cs;
1332	tp->tf_eflags = regs->r_eflags;
1333	tp->tf_esp = regs->r_esp;
1334	tp->tf_ss = regs->r_ss;
1335	return 0;
1336}
1337
1338#include "ddb.h"
1339#if NDDB <= 0
1340void
1341Debugger(const char *msg)
1342{
1343	printf("Debugger(\"%s\") called.", msg);
1344}
1345#endif /* no DDB */
1346