machdep.c revision 682
1/*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by the University of
20 *	California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	from: @(#)machdep.c	7.4 (Berkeley) 6/3/91
38 *	$Id: machdep.c,v 1.12 1993/10/15 10:34:22 rgrimes Exp $
39 */
40
41#include "npx.h"
42#include "isa.h"
43
44#include <stddef.h>
45#include "param.h"
46#include "systm.h"
47#include "signalvar.h"
48#include "kernel.h"
49#include "map.h"
50#include "proc.h"
51#include "user.h"
52#include "exec.h"            /* for PS_STRINGS */
53#include "buf.h"
54#include "reboot.h"
55#include "conf.h"
56#include "file.h"
57#include "callout.h"
58#include "malloc.h"
59#include "mbuf.h"
60#include "msgbuf.h"
61#include "net/netisr.h"
62
63#ifdef SYSVSHM
64#include "sys/shm.h"
65#endif
66
67#include "vm/vm.h"
68#include "vm/vm_kern.h"
69#include "vm/vm_page.h"
70
71#include "sys/exec.h"
72#include "sys/vnode.h"
73
74#ifndef MACHINE_NONCONTIG
75extern vm_offset_t avail_end;
76#else
77extern vm_offset_t avail_start, avail_end;
78static vm_offset_t hole_start, hole_end;
79static vm_offset_t avail_next;
80static unsigned int avail_remaining;
81#endif /* MACHINE_NONCONTIG */
82
83#include "machine/cpu.h"
84#include "machine/reg.h"
85#include "machine/psl.h"
86#include "machine/specialreg.h"
87#include "machine/sysarch.h"
88
89#include "i386/isa/isa.h"
90#include "i386/isa/rtc.h"
91
92
93#define	EXPECT_BASEMEM	640	/* The expected base memory*/
94#define	INFORM_WAIT	1	/* Set to pause berfore crash in weird cases*/
95
96/*
97 * Declare these as initialized data so we can patch them.
98 */
99int	nswbuf = 0;
100#ifdef	NBUF
101int	nbuf = NBUF;
102#else
103int	nbuf = 0;
104#endif
105#ifdef	BUFPAGES
106int	bufpages = BUFPAGES;
107#else
108int	bufpages = 0;
109#endif
110extern int freebufspace;
111
112int _udatasel, _ucodesel;
113
114/*
115 * Machine-dependent startup code
116 */
117int boothowto = 0, Maxmem = 0;
118long dumplo;
119int physmem, maxmem;
120extern int bootdev;
121#ifdef SMALL
122extern int forcemaxmem;
123#endif
124int biosmem;
125
126extern cyloffset;
127
128int cpu_class;
129
130void dumpsys __P((void));
131
132void
133cpu_startup()
134{
135	register int unixsize;
136	register unsigned i;
137	register struct pte *pte;
138	int mapaddr, j;
139	register caddr_t v;
140	int maxbufs, base, residual;
141	extern long Usrptsize;
142	vm_offset_t minaddr, maxaddr;
143	vm_size_t size;
144	int firstaddr;
145
146	/*
147	 * Initialize error message buffer (at end of core).
148	 */
149
150	/* avail_end was pre-decremented in pmap_bootstrap to compensate */
151	for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
152#ifndef MACHINE_NONCONTIG
153		pmap_enter(pmap_kernel(), msgbufp, avail_end + i * NBPG,
154			   VM_PROT_ALL, TRUE);
155#else
156		pmap_enter(pmap_kernel(), (caddr_t)msgbufp + i * NBPG,
157			   avail_end + i * NBPG, VM_PROT_ALL, TRUE);
158#endif
159	msgbufmapped = 1;
160
161	/*
162	 * Good {morning,afternoon,evening,night}.
163	 */
164	printf(version);
165	identifycpu();
166	printf("real mem  = %d\n", ctob(physmem));
167
168	/*
169	 * Allocate space for system data structures.
170	 * The first available kernel virtual address is in "v".
171	 * As pages of kernel virtual memory are allocated, "v" is incremented.
172	 * As pages of memory are allocated and cleared,
173	 * "firstaddr" is incremented.
174	 * An index into the kernel page table corresponding to the
175	 * virtual memory address maintained in "v" is kept in "mapaddr".
176	 */
177
178	/*
179	 * Make two passes.  The first pass calculates how much memory is
180	 * needed and allocates it.  The second pass assigns virtual
181	 * addresses to the various data structures.
182	 */
183	firstaddr = 0;
184again:
185	v = (caddr_t)firstaddr;
186
187#define	valloc(name, type, num) \
188	    (name) = (type *)v; v = (caddr_t)((name)+(num))
189#define	valloclim(name, type, num, lim) \
190	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
191/*	valloc(cfree, struct cblock, nclist);  no clists any more!!! - cgd */
192	valloc(callout, struct callout, ncallout);
193#ifdef NetBSD
194	valloc(swapmap, struct map, nswapmap = maxproc * 2);
195#endif
196#ifdef SYSVSHM
197	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
198#endif
199	/*
200	 * Determine how many buffers to allocate.
201	 * Use 20% of memory of memory beyond the first 2MB
202	 * Insure a minimum of 16 fs buffers.
203	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
204	 */
205	if (bufpages == 0)
206		bufpages = (physmem << PAGE_SHIFT - 3072*1024) / NBPG / 5;
207	if (bufpages < 32)
208		bufpages = 32;
209
210	/*
211	 * We must still limit the maximum number of buffers to be no
212	 * more than 2/5's of the size of the kernal malloc region, this
213	 * will only take effect for machines with lots of memory
214	 */
215	bufpages = min(bufpages, (VM_KMEM_SIZE / NBPG) * 2 / 5);
216	if (nbuf == 0) {
217		nbuf = bufpages / 2;
218		if (nbuf < 16)
219			nbuf = 16;
220	}
221	freebufspace = bufpages * NBPG;
222	if (nswbuf == 0) {
223		nswbuf = (nbuf / 2) &~ 1;	/* force even */
224		if (nswbuf > 256)
225			nswbuf = 256;		/* sanity */
226	}
227	valloc(swbuf, struct buf, nswbuf);
228	valloc(buf, struct buf, nbuf);
229
230	/*
231	 * End of first pass, size has been calculated so allocate memory
232	 */
233	if (firstaddr == 0) {
234		size = (vm_size_t)(v - firstaddr);
235		firstaddr = (int)kmem_alloc(kernel_map, round_page(size));
236		if (firstaddr == 0)
237			panic("startup: no room for tables");
238		goto again;
239	}
240	/*
241	 * End of second pass, addresses have been assigned
242	 */
243	if ((vm_size_t)(v - firstaddr) != size)
244		panic("startup: table size inconsistency");
245
246	/*
247	 * Allocate a submap for buffer space allocations.
248	 * XXX we are NOT using buffer_map, but due to
249	 * the references to it we will just allocate 1 page of
250	 * vm (not real memory) to make things happy...
251	 */
252	buffer_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
253				/* bufpages * */NBPG, TRUE);
254	/*
255	 * Allocate a submap for exec arguments.  This map effectively
256	 * limits the number of processes exec'ing at any time.
257	 */
258/*	exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
259 *				16*NCARGS, TRUE);
260 *	NOT CURRENTLY USED -- cgd
261 */
262	/*
263	 * Allocate a submap for physio
264	 */
265	phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
266				 VM_PHYS_SIZE, TRUE);
267
268	/*
269	 * Finally, allocate mbuf pool.  Since mclrefcnt is an off-size
270	 * we use the more space efficient malloc in place of kmem_alloc.
271	 */
272	mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
273				   M_MBUF, M_NOWAIT);
274	bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
275	mb_map = kmem_suballoc(kernel_map, (vm_offset_t)&mbutl, &maxaddr,
276			       VM_MBUF_SIZE, FALSE);
277	/*
278	 * Initialize callouts
279	 */
280	callfree = callout;
281	for (i = 1; i < ncallout; i++)
282		callout[i-1].c_next = &callout[i];
283
284	printf("avail mem = %d\n", ptoa(vm_page_free_count));
285	printf("using %d buffers containing %d bytes of memory\n",
286		nbuf, bufpages * CLBYTES);
287
288	/*
289	 * Set up CPU-specific registers, cache, etc.
290	 */
291	initcpu();
292
293	/*
294	 * Set up buffers, so they can be used to read disk labels.
295	 */
296	bufinit();
297
298	/*
299	 * Configure the system.
300	 */
301	configure();
302}
303
304
305struct cpu_nameclass i386_cpus[] = {
306	{ "Intel 80286",	CPUCLASS_286 },		/* CPU_286   */
307	{ "i386SX",		CPUCLASS_386 },		/* CPU_386SX */
308	{ "i386DX",		CPUCLASS_386 },		/* CPU_386   */
309	{ "i486SX",		CPUCLASS_486 },		/* CPU_486SX */
310	{ "i486DX",		CPUCLASS_486 },		/* CPU_486   */
311	{ "i586",		CPUCLASS_586 },		/* CPU_586   */
312};
313
314identifycpu()	/* translated from hp300 -- cgd */
315{
316	printf("CPU: ");
317	if (cpu >= 0 && cpu < (sizeof i386_cpus/sizeof(struct cpu_nameclass))) {
318		printf("%s", i386_cpus[cpu].cpu_name);
319		cpu_class = i386_cpus[cpu].cpu_class;
320	} else {
321		printf("unknown cpu type %d\n", cpu);
322		panic("startup: bad cpu id");
323	}
324	printf(" (");
325	switch(cpu_class) {
326	case CPUCLASS_286:
327		printf("286");
328		break;
329	case CPUCLASS_386:
330		printf("386");
331		break;
332	case CPUCLASS_486:
333		printf("486");
334		break;
335	case CPUCLASS_586:
336		printf("586");
337		break;
338	default:
339		printf("unknown");	/* will panic below... */
340	}
341	printf("-class CPU)");
342	printf("\n");	/* cpu speed would be nice, but how? */
343
344	/*
345	 * Now that we have told the user what they have,
346	 * let them know if that machine type isn't configured.
347	 */
348	switch (cpu_class) {
349	case CPUCLASS_286:	/* a 286 should not make it this far, anyway */
350#if !defined(I386_CPU) && !defined(I486_CPU) && !defined(I586_CPU)
351#error This kernel is not configured for one of the supported CPUs
352#endif
353#if !defined(I386_CPU)
354	case CPUCLASS_386:
355#endif
356#if !defined(I486_CPU)
357	case CPUCLASS_486:
358#endif
359#if !defined(I586_CPU)
360	case CPUCLASS_586:
361#endif
362		panic("CPU class not configured");
363	default:
364		break;
365	}
366}
367
368#ifdef PGINPROF
369/*
370 * Return the difference (in microseconds)
371 * between the  current time and a previous
372 * time as represented  by the arguments.
373 * If there is a pending clock interrupt
374 * which has not been serviced due to high
375 * ipl, return error code.
376 */
377/*ARGSUSED*/
378vmtime(otime, olbolt, oicr)
379	register int otime, olbolt, oicr;
380{
381
382	return (((time.tv_sec-otime)*60 + lbolt-olbolt)*16667);
383}
384#endif
385
386extern int kstack[];
387
388/*
389 * Send an interrupt to process.
390 *
391 * Stack is set up to allow sigcode stored
392 * in u. to call routine, followed by kcall
393 * to sigreturn routine below.  After sigreturn
394 * resets the signal mask, the stack, and the
395 * frame pointer, it returns to the user
396 * specified pc, psl.
397 */
398void
399sendsig(catcher, sig, mask, code)
400	sig_t catcher;
401	int sig, mask;
402	unsigned code;
403{
404	register struct proc *p = curproc;
405	register int *regs;
406	register struct sigframe *fp;
407	struct sigacts *ps = p->p_sigacts;
408	int oonstack, frmtrap;
409
410	regs = p->p_regs;
411        oonstack = ps->ps_onstack;
412	frmtrap = curpcb->pcb_flags & FM_TRAP;
413	/*
414	 * Allocate and validate space for the signal handler
415	 * context. Note that if the stack is in P0 space, the
416	 * call to grow() is a nop, and the useracc() check
417	 * will fail if the process has not already allocated
418	 * the space with a `brk'.
419	 */
420        if (!ps->ps_onstack && (ps->ps_sigonstack & sigmask(sig))) {
421		fp = (struct sigframe *)(ps->ps_sigsp
422				- sizeof(struct sigframe));
423                ps->ps_onstack = 1;
424	} else {
425		if (frmtrap)
426			fp = (struct sigframe *)(regs[tESP]
427				- sizeof(struct sigframe));
428		else
429			fp = (struct sigframe *)(regs[sESP]
430				- sizeof(struct sigframe));
431	}
432
433	if ((unsigned)fp <= (unsigned)p->p_vmspace->vm_maxsaddr + MAXSSIZ - ctob(p->p_vmspace->vm_ssize))
434		(void)grow(p, (unsigned)fp);
435
436	if (useracc((caddr_t)fp, sizeof (struct sigframe), B_WRITE) == 0) {
437		/*
438		 * Process has trashed its stack; give it an illegal
439		 * instruction to halt it in its tracks.
440		 */
441		SIGACTION(p, SIGILL) = SIG_DFL;
442		sig = sigmask(SIGILL);
443		p->p_sigignore &= ~sig;
444		p->p_sigcatch &= ~sig;
445		p->p_sigmask &= ~sig;
446		psignal(p, SIGILL);
447		return;
448	}
449
450	/*
451	 * Build the argument list for the signal handler.
452	 */
453	fp->sf_signum = sig;
454	fp->sf_code = code;
455	fp->sf_scp = &fp->sf_sc;
456	fp->sf_handler = catcher;
457
458	/* save scratch registers */
459	if(frmtrap) {
460		fp->sf_eax = regs[tEAX];
461		fp->sf_edx = regs[tEDX];
462		fp->sf_ecx = regs[tECX];
463	} else {
464		fp->sf_eax = regs[sEAX];
465		fp->sf_edx = regs[sEDX];
466		fp->sf_ecx = regs[sECX];
467	}
468	/*
469	 * Build the signal context to be used by sigreturn.
470	 */
471	fp->sf_sc.sc_onstack = oonstack;
472	fp->sf_sc.sc_mask = mask;
473	if(frmtrap) {
474		fp->sf_sc.sc_sp = regs[tESP];
475		fp->sf_sc.sc_fp = regs[tEBP];
476		fp->sf_sc.sc_pc = regs[tEIP];
477		fp->sf_sc.sc_ps = regs[tEFLAGS];
478		regs[tESP] = (int)fp;
479		regs[tEIP] = (int)((struct pcb *)kstack)->pcb_sigc;
480	} else {
481		fp->sf_sc.sc_sp = regs[sESP];
482		fp->sf_sc.sc_fp = regs[sEBP];
483		fp->sf_sc.sc_pc = regs[sEIP];
484		fp->sf_sc.sc_ps = regs[sEFLAGS];
485		regs[sESP] = (int)fp;
486		regs[sEIP] = (int)((struct pcb *)kstack)->pcb_sigc;
487	}
488}
489
490/*
491 * System call to cleanup state after a signal
492 * has been taken.  Reset signal mask and
493 * stack state from context left by sendsig (above).
494 * Return to previous pc and psl as specified by
495 * context left by sendsig. Check carefully to
496 * make sure that the user has not modified the
497 * psl to gain improper priviledges or to cause
498 * a machine fault.
499 */
500struct sigreturn_args {
501	struct sigcontext *sigcntxp;
502};
503
504sigreturn(p, uap, retval)
505	struct proc *p;
506	struct sigreturn_args *uap;
507	int *retval;
508{
509	register struct sigcontext *scp;
510	register struct sigframe *fp;
511	register int *regs = p->p_regs;
512
513	/*
514	 * (XXX old comment) regs[sESP] points to the return address.
515	 * The user scp pointer is above that.
516	 * The return address is faked in the signal trampoline code
517	 * for consistency.
518	 */
519	scp = uap->sigcntxp;
520	fp = (struct sigframe *)
521	     ((caddr_t)scp - offsetof(struct sigframe, sf_sc));
522
523	if (useracc((caddr_t)fp, sizeof (*fp), 0) == 0)
524		return(EINVAL);
525
526	/* restore scratch registers */
527	regs[sEAX] = fp->sf_eax ;
528	regs[sEDX] = fp->sf_edx ;
529	regs[sECX] = fp->sf_ecx ;
530
531	if (useracc((caddr_t)scp, sizeof (*scp), 0) == 0)
532		return(EINVAL);
533#ifdef notyet
534	if ((scp->sc_ps & PSL_MBZ) != 0 || (scp->sc_ps & PSL_MBO) != PSL_MBO) {
535		return(EINVAL);
536	}
537#endif
538        p->p_sigacts->ps_onstack = scp->sc_onstack & 01;
539	p->p_sigmask = scp->sc_mask &~
540	    (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
541	regs[sEBP] = scp->sc_fp;
542	regs[sESP] = scp->sc_sp;
543	regs[sEIP] = scp->sc_pc;
544	regs[sEFLAGS] = scp->sc_ps;
545	return(EJUSTRETURN);
546}
547
548/*
549 * a simple function to make the system panic (and dump a vmcore)
550 * in a predictable fashion
551 */
552void diediedie()
553{
554	panic("because you said to!");
555}
556
557int	waittime = -1;
558struct pcb dumppcb;
559
560void
561boot(arghowto)
562	int arghowto;
563{
564	register long dummy;		/* r12 is reserved */
565	register int howto;		/* r11 == how to boot */
566	register int devtype;		/* r10 == major of root dev */
567	extern int cold;
568	int nomsg = 1;
569
570	if(cold) {
571		printf("hit reset please");
572		for(;;);
573	}
574	howto = arghowto;
575	if ((howto&RB_NOSYNC) == 0 && waittime < 0 && bfreelist[0].b_forw) {
576		register struct buf *bp;
577		int iter, nbusy;
578
579		waittime = 0;
580		(void) splnet();
581		printf("syncing disks... ");
582		/*
583		 * Release inodes held by texts before update.
584		 */
585		if (panicstr == 0)
586			vnode_pager_umount(NULL);
587		sync((struct sigcontext *)0);
588		/*
589		 * Unmount filesystems
590		 */
591#if 0
592		if (panicstr == 0)
593			vfs_unmountall();
594#endif
595
596		for (iter = 0; iter < 20; iter++) {
597			nbusy = 0;
598			for (bp = &buf[nbuf]; --bp >= buf; )
599				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
600					nbusy++;
601			if (nbusy == 0)
602				break;
603			if (nomsg) {
604				printf("updating disks before rebooting... ");
605				nomsg = 0;
606			}
607			printf("%d ", nbusy);
608			DELAY(40000 * iter);
609		}
610		if (nbusy)
611			printf("giving up\n");
612		else
613			printf("done\n");
614		DELAY(10000);			/* wait for printf to finish */
615	}
616	splhigh();
617	devtype = major(rootdev);
618	if (howto&RB_HALT) {
619		printf("\n");
620		printf("The operating system has halted.\n");
621		printf("Please press any key to reboot.\n\n");
622		cngetc();
623	} else {
624		if (howto & RB_DUMP) {
625			savectx(&dumppcb, 0);
626			dumppcb.pcb_ptd = rcr3();
627			dumpsys();
628			/*NOTREACHED*/
629		}
630	}
631#ifdef lint
632	dummy = 0; dummy = dummy;
633	printf("howto %d, devtype %d\n", arghowto, devtype);
634#endif
635	cpu_reset();
636	for(;;) ;
637	/*NOTREACHED*/
638}
639
640unsigned	dumpmag = 0x8fca0101;	/* magic number for savecore */
641int		dumpsize = 0;		/* also for savecore */
642/*
643 * Doadump comes here after turning off memory management and
644 * getting on the dump stack, either when called above, or by
645 * the auto-restart code.
646 */
647void
648dumpsys()
649{
650
651	if (dumpdev == NODEV)
652		return;
653	if ((minor(dumpdev)&07) != 1)
654		return;
655	dumpsize = physmem;
656	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
657	printf("dump ");
658	switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
659
660	case ENXIO:
661		printf("device bad\n");
662		break;
663
664	case EFAULT:
665		printf("device not ready\n");
666		break;
667
668	case EINVAL:
669		printf("area improper\n");
670		break;
671
672	case EIO:
673		printf("i/o error\n");
674		break;
675
676	case EINTR:
677		printf("aborted from console\n");
678		break;
679
680	default:
681		printf("succeeded\n");
682		break;
683	}
684	printf("\n\n");
685	DELAY(1000);
686}
687
688#ifdef HZ
689/*
690 * If HZ is defined we use this code, otherwise the code in
691 * /sys/i386/i386/microtime.s is used.  The othercode only works
692 * for HZ=100.
693 */
694microtime(tvp)
695	register struct timeval *tvp;
696{
697	int s = splhigh();
698
699	*tvp = time;
700	tvp->tv_usec += tick;
701	while (tvp->tv_usec > 1000000) {
702		tvp->tv_sec++;
703		tvp->tv_usec -= 1000000;
704	}
705	splx(s);
706}
707#endif /* HZ */
708
709physstrat(bp, strat, prio)
710	struct buf *bp;
711	int (*strat)(), prio;
712{
713	register int s;
714	caddr_t baddr;
715
716	/*
717	 * vmapbuf clobbers b_addr so we must remember it so that it
718	 * can be restored after vunmapbuf.  This is truely rude, we
719	 * should really be storing this in a field in the buf struct
720	 * but none are available and I didn't want to add one at
721	 * this time.  Note that b_addr for dirty page pushes is
722	 * restored in vunmapbuf. (ugh!)
723	 */
724	baddr = bp->b_un.b_addr;
725	vmapbuf(bp);
726	(*strat)(bp);
727	/* pageout daemon doesn't wait for pushed pages */
728	if (bp->b_flags & B_DIRTY)
729		return;
730	s = splbio();
731	while ((bp->b_flags & B_DONE) == 0)
732		sleep((caddr_t)bp, prio);
733	splx(s);
734	vunmapbuf(bp);
735	bp->b_un.b_addr = baddr;
736}
737
738initcpu()
739{
740}
741
742/*
743 * Clear registers on exec
744 */
745void
746setregs(p, entry)
747	struct proc *p;
748	u_long entry;
749{
750
751	p->p_regs[sEBP] = 0;	/* bottom of the fp chain */
752	p->p_regs[sEIP] = entry;
753
754	p->p_addr->u_pcb.pcb_flags = 0;	/* no fp at all */
755	load_cr0(rcr0() | CR0_TS);	/* start emulating */
756#if	NNPX > 0
757	npxinit(__INITIAL_NPXCW__);
758#endif	/* NNPX > 0 */
759}
760
761/*
762 * Initialize 386 and configure to run kernel
763 */
764
765/*
766 * Initialize segments & interrupt table
767 */
768#define DESCRIPTOR_SIZE	8
769
770#define	GNULL_SEL	0	/* Null Descriptor */
771#define	GCODE_SEL	1	/* Kernel Code Descriptor */
772#define	GDATA_SEL	2	/* Kernel Data Descriptor */
773#define	GLDT_SEL	3	/* LDT - eventually one per process */
774#define	GTGATE_SEL	4	/* Process task switch gate */
775#define	GPANIC_SEL	5	/* Task state to consider panic from */
776#define	GPROC0_SEL	6	/* Task state process slot zero and up */
777#define NGDT 	GPROC0_SEL+1
778
779unsigned char gdt[GPROC0_SEL+1][DESCRIPTOR_SIZE];
780
781/* interrupt descriptor table */
782struct gate_descriptor idt[NIDT];
783
784/* local descriptor table */
785unsigned char ldt[5][DESCRIPTOR_SIZE];
786#define	LSYS5CALLS_SEL	0	/* forced by intel BCS */
787#define	LSYS5SIGR_SEL	1
788
789#define	L43BSDCALLS_SEL	2	/* notyet */
790#define	LUCODE_SEL	3
791#define	LUDATA_SEL	4
792/* seperate stack, es,fs,gs sels ? */
793/* #define	LPOSIXCALLS_SEL	5	/* notyet */
794
795struct	i386tss	tss, panic_tss;
796
797extern  struct user *proc0paddr;
798
799/* software prototypes -- in more palatable form */
800struct soft_segment_descriptor gdt_segs[] = {
801	/* Null Descriptor */
802{	0x0,			/* segment base address  */
803	0x0,			/* length */
804	0,			/* segment type */
805	0,			/* segment descriptor priority level */
806	0,			/* segment descriptor present */
807	0, 0,
808	0,			/* default 32 vs 16 bit size */
809	0  			/* limit granularity (byte/page units)*/ },
810	/* Code Descriptor for kernel */
811{	0x0,			/* segment base address  */
812	0xfffff,		/* length - all address space */
813	SDT_MEMERA,		/* segment type */
814	0,			/* segment descriptor priority level */
815	1,			/* segment descriptor present */
816	0, 0,
817	1,			/* default 32 vs 16 bit size */
818	1  			/* limit granularity (byte/page units)*/ },
819	/* Data Descriptor for kernel */
820{	0x0,			/* segment base address  */
821	0xfffff,		/* length - all address space */
822	SDT_MEMRWA,		/* segment type */
823	0,			/* segment descriptor priority level */
824	1,			/* segment descriptor present */
825	0, 0,
826	1,			/* default 32 vs 16 bit size */
827	1  			/* limit granularity (byte/page units)*/ },
828	/* LDT Descriptor */
829{	(int) ldt,			/* segment base address  */
830	sizeof(ldt)-1,		/* length - all address space */
831	SDT_SYSLDT,		/* segment type */
832	0,			/* segment descriptor priority level */
833	1,			/* segment descriptor present */
834	0, 0,
835	0,			/* unused - default 32 vs 16 bit size */
836	0  			/* limit granularity (byte/page units)*/ },
837	/* Null Descriptor - Placeholder */
838{	0x0,			/* segment base address  */
839	0x0,			/* length - all address space */
840	0,			/* segment type */
841	0,			/* segment descriptor priority level */
842	0,			/* segment descriptor present */
843	0, 0,
844	0,			/* default 32 vs 16 bit size */
845	0  			/* limit granularity (byte/page units)*/ },
846	/* Panic Tss Descriptor */
847{	(int) &panic_tss,		/* segment base address  */
848	sizeof(tss)-1,		/* length - all address space */
849	SDT_SYS386TSS,		/* segment type */
850	0,			/* segment descriptor priority level */
851	1,			/* segment descriptor present */
852	0, 0,
853	0,			/* unused - default 32 vs 16 bit size */
854	0  			/* limit granularity (byte/page units)*/ },
855	/* Proc 0 Tss Descriptor */
856{	(int) kstack,			/* segment base address  */
857	sizeof(tss)-1,		/* length - all address space */
858	SDT_SYS386TSS,		/* segment type */
859	0,			/* segment descriptor priority level */
860	1,			/* segment descriptor present */
861	0, 0,
862	0,			/* unused - default 32 vs 16 bit size */
863	0  			/* limit granularity (byte/page units)*/ }};
864
865struct soft_segment_descriptor ldt_segs[] = {
866	/* Null Descriptor - overwritten by call gate */
867{	0x0,			/* segment base address  */
868	0x0,			/* length - all address space */
869	0,			/* segment type */
870	0,			/* segment descriptor priority level */
871	0,			/* segment descriptor present */
872	0, 0,
873	0,			/* default 32 vs 16 bit size */
874	0  			/* limit granularity (byte/page units)*/ },
875	/* Null Descriptor - overwritten by call gate */
876{	0x0,			/* segment base address  */
877	0x0,			/* length - all address space */
878	0,			/* segment type */
879	0,			/* segment descriptor priority level */
880	0,			/* segment descriptor present */
881	0, 0,
882	0,			/* default 32 vs 16 bit size */
883	0  			/* limit granularity (byte/page units)*/ },
884	/* Null Descriptor - overwritten by call gate */
885{	0x0,			/* segment base address  */
886	0x0,			/* length - all address space */
887	0,			/* segment type */
888	0,			/* segment descriptor priority level */
889	0,			/* segment descriptor present */
890	0, 0,
891	0,			/* default 32 vs 16 bit size */
892	0  			/* limit granularity (byte/page units)*/ },
893	/* Code Descriptor for user */
894{	0x0,			/* segment base address  */
895	0xfffff,		/* length - all address space */
896	SDT_MEMERA,		/* segment type */
897	SEL_UPL,		/* segment descriptor priority level */
898	1,			/* segment descriptor present */
899	0, 0,
900	1,			/* default 32 vs 16 bit size */
901	1  			/* limit granularity (byte/page units)*/ },
902	/* Data Descriptor for user */
903{	0x0,			/* segment base address  */
904	0xfffff,		/* length - all address space */
905	SDT_MEMRWA,		/* segment type */
906	SEL_UPL,		/* segment descriptor priority level */
907	1,			/* segment descriptor present */
908	0, 0,
909	1,			/* default 32 vs 16 bit size */
910	1  			/* limit granularity (byte/page units)*/ } };
911
912setidt(idx, func, typ, dpl) char *func; {
913	struct gate_descriptor *ip = idt + idx;
914
915	ip->gd_looffset = (int)func;
916	ip->gd_selector = 8;
917	ip->gd_stkcpy = 0;
918	ip->gd_xx = 0;
919	ip->gd_type = typ;
920	ip->gd_dpl = dpl;
921	ip->gd_p = 1;
922	ip->gd_hioffset = ((int)func)>>16 ;
923}
924
925#define	IDTVEC(name)	__CONCAT(X, name)
926extern	IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
927	IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(dble), IDTVEC(fpusegm),
928	IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
929	IDTVEC(page), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(rsvd0),
930	IDTVEC(rsvd1), IDTVEC(rsvd2), IDTVEC(rsvd3), IDTVEC(rsvd4),
931	IDTVEC(rsvd5), IDTVEC(rsvd6), IDTVEC(rsvd7), IDTVEC(rsvd8),
932	IDTVEC(rsvd9), IDTVEC(rsvd10), IDTVEC(rsvd11), IDTVEC(rsvd12),
933	IDTVEC(rsvd13), IDTVEC(rsvd14), IDTVEC(rsvd14), IDTVEC(syscall);
934
935int lcr0(), lcr3(), rcr0(), rcr2();
936int _gsel_tss;
937
938init386(first)
939{
940	extern ssdtosd(), lgdt(), lidt(), lldt(), etext;
941	int x, *pi;
942	unsigned biosbasemem, biosextmem;
943	struct gate_descriptor *gdp;
944	extern int sigcode,szsigcode;
945	/* table descriptors - used to load tables by microp */
946	unsigned short	r_gdt[3], r_idt[3];
947	int	pagesinbase, pagesinext;
948
949
950	proc0.p_addr = proc0paddr;
951
952	/*
953	 * Initialize the console before we print anything out.
954	 */
955
956	cninit ();
957
958	/*
959	 * make gdt memory segments, the code segment goes up to end of the
960	 * page with etext in it, the data segment goes to the end of
961	 * the address space
962	 */
963	gdt_segs[GCODE_SEL].ssd_limit = i386_btop(i386_round_page(&etext)) - 1;
964	gdt_segs[GDATA_SEL].ssd_limit = 0xffffffff;	/* XXX constant? */
965	for (x=0; x < NGDT; x++) ssdtosd(gdt_segs+x, gdt+x);
966	/* make ldt memory segments */
967	/*
968	 * The data segment limit must not cover the user area because we
969	 * don't want the user area to be writable in copyout() etc. (page
970	 * level protection is lost in kernel mode on 386's).  Also, we
971	 * don't want the user area to be writable directly (page level
972	 * protection of the user area is not available on 486's with
973	 * CR0_WP set, because there is no user-read/kernel-write mode).
974	 *
975	 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max.  And it
976	 * should be spelled ...MAX_USER...
977	 */
978#define VM_END_USER_RW_ADDRESS	VM_MAXUSER_ADDRESS
979	/*
980	 * The code segment limit has to cover the user area until we move
981	 * the signal trampoline out of the user area.  This is safe because
982	 * the code segment cannot be written to directly.
983	 */
984#define VM_END_USER_R_ADDRESS	(VM_END_USER_RW_ADDRESS + UPAGES * NBPG)
985	ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1;
986	ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1;
987	/* Note. eventually want private ldts per process */
988	for (x=0; x < 5; x++) ssdtosd(ldt_segs+x, ldt+x);
989
990	/* exceptions */
991	setidt(0, &IDTVEC(div),  SDT_SYS386TGT, SEL_KPL);
992	setidt(1, &IDTVEC(dbg),  SDT_SYS386TGT, SEL_KPL);
993	setidt(2, &IDTVEC(nmi),  SDT_SYS386TGT, SEL_KPL);
994 	setidt(3, &IDTVEC(bpt),  SDT_SYS386TGT, SEL_UPL);
995	setidt(4, &IDTVEC(ofl),  SDT_SYS386TGT, SEL_KPL);
996	setidt(5, &IDTVEC(bnd),  SDT_SYS386TGT, SEL_KPL);
997	setidt(6, &IDTVEC(ill),  SDT_SYS386TGT, SEL_KPL);
998	setidt(7, &IDTVEC(dna),  SDT_SYS386TGT, SEL_KPL);
999	setidt(8, &IDTVEC(dble),  SDT_SYS386TGT, SEL_KPL);
1000	setidt(9, &IDTVEC(fpusegm),  SDT_SYS386TGT, SEL_KPL);
1001	setidt(10, &IDTVEC(tss),  SDT_SYS386TGT, SEL_KPL);
1002	setidt(11, &IDTVEC(missing),  SDT_SYS386TGT, SEL_KPL);
1003	setidt(12, &IDTVEC(stk),  SDT_SYS386TGT, SEL_KPL);
1004	setidt(13, &IDTVEC(prot),  SDT_SYS386TGT, SEL_KPL);
1005	setidt(14, &IDTVEC(page),  SDT_SYS386TGT, SEL_KPL);
1006	setidt(15, &IDTVEC(rsvd),  SDT_SYS386TGT, SEL_KPL);
1007	setidt(16, &IDTVEC(fpu),  SDT_SYS386TGT, SEL_KPL);
1008	setidt(17, &IDTVEC(rsvd0),  SDT_SYS386TGT, SEL_KPL);
1009	setidt(18, &IDTVEC(rsvd1),  SDT_SYS386TGT, SEL_KPL);
1010	setidt(19, &IDTVEC(rsvd2),  SDT_SYS386TGT, SEL_KPL);
1011	setidt(20, &IDTVEC(rsvd3),  SDT_SYS386TGT, SEL_KPL);
1012	setidt(21, &IDTVEC(rsvd4),  SDT_SYS386TGT, SEL_KPL);
1013	setidt(22, &IDTVEC(rsvd5),  SDT_SYS386TGT, SEL_KPL);
1014	setidt(23, &IDTVEC(rsvd6),  SDT_SYS386TGT, SEL_KPL);
1015	setidt(24, &IDTVEC(rsvd7),  SDT_SYS386TGT, SEL_KPL);
1016	setidt(25, &IDTVEC(rsvd8),  SDT_SYS386TGT, SEL_KPL);
1017	setidt(26, &IDTVEC(rsvd9),  SDT_SYS386TGT, SEL_KPL);
1018	setidt(27, &IDTVEC(rsvd10),  SDT_SYS386TGT, SEL_KPL);
1019	setidt(28, &IDTVEC(rsvd11),  SDT_SYS386TGT, SEL_KPL);
1020	setidt(29, &IDTVEC(rsvd12),  SDT_SYS386TGT, SEL_KPL);
1021	setidt(30, &IDTVEC(rsvd13),  SDT_SYS386TGT, SEL_KPL);
1022	setidt(31, &IDTVEC(rsvd14),  SDT_SYS386TGT, SEL_KPL);
1023
1024#include	"isa.h"
1025#if	NISA >0
1026	isa_defaultirq();
1027#endif
1028
1029	r_gdt[0] = (unsigned short) (sizeof(gdt) - 1);
1030	r_gdt[1] = (unsigned short) ((int) gdt & 0xffff);
1031	r_gdt[2] = (unsigned short) ((int) gdt >> 16);
1032	lgdt(&r_gdt);
1033	r_idt[0] = (unsigned short) (sizeof(idt) - 1);
1034	r_idt[1] = (unsigned short) ((int) idt & 0xfffff);
1035	r_idt[2] = (unsigned short) ((int) idt >> 16);
1036	lidt(&r_idt);
1037	lldt(GSEL(GLDT_SEL, SEL_KPL));
1038
1039#include "ddb.h"
1040#if NDDB > 0
1041	kdb_init();
1042	if (boothowto & RB_KDB)
1043		Debugger();
1044#endif
1045
1046	/* Use BIOS values stored in RTC CMOS RAM, since probing
1047	 * breaks certain 386 AT relics.
1048	 */
1049	biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8);
1050	biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8);
1051/*printf("bios base %d ext %d ", biosbasemem, biosextmem);*/
1052
1053	/*
1054	 * 15 Aug 92	Terry Lambert		The real fix for the CMOS bug
1055	 */
1056	if( biosbasemem != EXPECT_BASEMEM) {
1057		printf( "Warning: Base memory %dK, assuming %dK\n", biosbasemem, EXPECT_BASEMEM);
1058		biosbasemem = EXPECT_BASEMEM;		/* assume base*/
1059	}
1060
1061	if( biosextmem > 65536) {
1062		printf( "Warning: Extended memory %dK(>64M), assuming 0K\n", biosextmem);
1063		biosextmem = 0;				/* assume none*/
1064	}
1065
1066	/*
1067	 * Go into normal calculation; Note that we try to run in 640K, and
1068	 * that invalid CMOS values of non 0xffff are no longer a cause of
1069	 * ptdi problems.  I have found a gutted kernel can run in 640K.
1070	 */
1071	pagesinbase = 640/4 - first/NBPG;
1072	pagesinext = biosextmem/4;
1073	/* use greater of either base or extended memory. do this
1074	 * until I reinstitue discontiguous allocation of vm_page
1075	 * array.
1076	 */
1077	if (pagesinbase > pagesinext)
1078		Maxmem = 640/4;
1079	else {
1080		Maxmem = pagesinext + 0x100000/NBPG;
1081		if (first < 0x100000)
1082			first = 0x100000; /* skip hole */
1083	}
1084
1085	/* This used to explode, since Maxmem used to be 0 for bas CMOS*/
1086	maxmem = Maxmem - 1;	/* highest page of usable memory */
1087	physmem = maxmem;	/* number of pages of physmem addr space */
1088/*printf("using first 0x%x to 0x%x\n ", first, maxmem*NBPG);*/
1089	if (maxmem < 2048/4) {
1090		printf("Too little RAM memory. Warning, running in degraded mode.\n");
1091#ifdef INFORM_WAIT
1092		/*
1093		 * People with less than 2 Meg have to hit return; this way
1094		 * we see the messages and can tell them why they blow up later.
1095		 * If they get working well enough to recompile, they can unset
1096		 * the flag; otherwise, it's a toy and they have to lump it.
1097		 */
1098		cngetc();
1099#endif	/* !INFORM_WAIT*/
1100	}
1101
1102	/* call pmap initialization to make new kernel address space */
1103#ifndef MACHINCE_NONCONTIG
1104	pmap_bootstrap (first, 0);
1105#else
1106	pmap_bootstrap ((vm_offset_t)atdevbase + IOM_SIZE);
1107
1108#endif /* MACHINE_NONCONTIG */
1109	/* now running on new page tables, configured,and u/iom is accessible */
1110
1111	/* make a initial tss so microp can get interrupt stack on syscall! */
1112	proc0.p_addr->u_pcb.pcb_tss.tss_esp0 = (int) kstack + UPAGES*NBPG;
1113	proc0.p_addr->u_pcb.pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
1114	_gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
1115
1116	((struct i386tss *)gdt_segs[GPROC0_SEL].ssd_base)->tss_ioopt =
1117		(sizeof(tss))<<16;
1118
1119	ltr(_gsel_tss);
1120
1121	/* make a call gate to reenter kernel with */
1122	gdp = (struct gate_descriptor *) &ldt[LSYS5CALLS_SEL][0];
1123
1124	x = (int) &IDTVEC(syscall);
1125	gdp->gd_looffset = x++;
1126	gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
1127	gdp->gd_stkcpy = 0;
1128	gdp->gd_type = SDT_SYS386CGT;
1129	gdp->gd_dpl = SEL_UPL;
1130	gdp->gd_p = 1;
1131	gdp->gd_hioffset = ((int) &IDTVEC(syscall)) >>16;
1132
1133	/* transfer to user mode */
1134
1135	_ucodesel = LSEL(LUCODE_SEL, SEL_UPL);
1136	_udatasel = LSEL(LUDATA_SEL, SEL_UPL);
1137
1138	/* setup proc 0's pcb */
1139	bcopy(&sigcode, proc0.p_addr->u_pcb.pcb_sigc, szsigcode);
1140	proc0.p_addr->u_pcb.pcb_flags = 0;
1141	proc0.p_addr->u_pcb.pcb_ptd = IdlePTD;
1142}
1143
1144extern struct pte	*CMAP1, *CMAP2;
1145extern caddr_t		CADDR1, CADDR2;
1146/*
1147 * zero out physical memory
1148 * specified in relocation units (NBPG bytes)
1149 */
1150clearseg(n) {
1151
1152	*(int *)CMAP2 = PG_V | PG_KW | ctob(n);
1153	load_cr3(rcr3());
1154	bzero(CADDR2,NBPG);
1155#ifndef MACHINE_NONCONTIG
1156	*(int *) CADDR2 = 0;
1157#endif /* MACHINE_NONCONTIG */
1158}
1159
1160/*
1161 * copy a page of physical memory
1162 * specified in relocation units (NBPG bytes)
1163 */
1164void
1165copyseg(frm, n) {
1166
1167	*(int *)CMAP2 = PG_V | PG_KW | ctob(n);
1168	load_cr3(rcr3());
1169	bcopy((void *)frm, (void *)CADDR2, NBPG);
1170}
1171
1172/*
1173 * copy a page of physical memory
1174 * specified in relocation units (NBPG bytes)
1175 */
1176void
1177physcopyseg(frm, to) {
1178
1179	*(int *)CMAP1 = PG_V | PG_KW | ctob(frm);
1180	*(int *)CMAP2 = PG_V | PG_KW | ctob(to);
1181	load_cr3(rcr3());
1182	bcopy(CADDR1, CADDR2, NBPG);
1183}
1184
1185/*aston() {
1186	schednetisr(NETISR_AST);
1187}*/
1188
1189void
1190setsoftclock() {
1191	schednetisr(NETISR_SCLK);
1192}
1193
1194/*
1195 * insert an element into a queue
1196 */
1197#undef insque
1198_insque(element, head)
1199	register struct prochd *element, *head;
1200{
1201	element->ph_link = head->ph_link;
1202	head->ph_link = (struct proc *)element;
1203	element->ph_rlink = (struct proc *)head;
1204	((struct prochd *)(element->ph_link))->ph_rlink=(struct proc *)element;
1205}
1206
1207/*
1208 * remove an element from a queue
1209 */
1210#undef remque
1211_remque(element)
1212	register struct prochd *element;
1213{
1214	((struct prochd *)(element->ph_link))->ph_rlink = element->ph_rlink;
1215	((struct prochd *)(element->ph_rlink))->ph_link = element->ph_link;
1216	element->ph_rlink = (struct proc *)0;
1217}
1218
1219#ifdef SLOW_OLD_COPYSTRS
1220vmunaccess() {}
1221
1222#if 0		/* assembler versions now in locore.s */
1223/*
1224 * Below written in C to allow access to debugging code
1225 */
1226copyinstr(fromaddr, toaddr, maxlength, lencopied) u_int *lencopied, maxlength;
1227	void *toaddr, *fromaddr; {
1228	int c,tally;
1229
1230	tally = 0;
1231	while (maxlength--) {
1232		c = fubyte(fromaddr++);
1233		if (c == -1) {
1234			if(lencopied) *lencopied = tally;
1235			return(EFAULT);
1236		}
1237		tally++;
1238		*(char *)toaddr++ = (char) c;
1239		if (c == 0){
1240			if(lencopied) *lencopied = (u_int)tally;
1241			return(0);
1242		}
1243	}
1244	if(lencopied) *lencopied = (u_int)tally;
1245	return(ENAMETOOLONG);
1246}
1247
1248copyoutstr(fromaddr, toaddr, maxlength, lencopied) u_int *lencopied, maxlength;
1249	void *fromaddr, *toaddr; {
1250	int c;
1251	int tally;
1252
1253	tally = 0;
1254	while (maxlength--) {
1255		c = subyte(toaddr++, *(char *)fromaddr);
1256		if (c == -1) return(EFAULT);
1257		tally++;
1258		if (*(char *)fromaddr++ == 0){
1259			if(lencopied) *lencopied = tally;
1260			return(0);
1261		}
1262	}
1263	if(lencopied) *lencopied = tally;
1264	return(ENAMETOOLONG);
1265}
1266
1267#endif /* SLOW_OLD_COPYSTRS */
1268
1269copystr(fromaddr, toaddr, maxlength, lencopied) u_int *lencopied, maxlength;
1270	void *fromaddr, *toaddr; {
1271	u_int tally;
1272
1273	tally = 0;
1274	while (maxlength--) {
1275		*(u_char *)toaddr = *(u_char *)fromaddr++;
1276		tally++;
1277		if (*(u_char *)toaddr++ == 0) {
1278			if(lencopied) *lencopied = tally;
1279			return(0);
1280		}
1281	}
1282	if(lencopied) *lencopied = tally;
1283	return(ENAMETOOLONG);
1284}
1285#endif
1286