machdep.c revision 1.31
1/*	$OpenBSD: machdep.c,v 1.31 2001/03/29 00:47:53 mickey Exp $	*/
2
3/*
4 * Copyright (c) 1999-2000 Michael Shalayeff
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *      This product includes software developed by Michael Shalayeff.
18 * 4. The name of the author may not be used to endorse or promote products
19 *    derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/signalvar.h>
37#include <sys/kernel.h>
38#include <sys/map.h>
39#include <sys/proc.h>
40#include <sys/buf.h>
41#include <sys/reboot.h>
42#include <sys/device.h>
43#include <sys/conf.h>
44#include <sys/file.h>
45#include <sys/timeout.h>
46#include <sys/malloc.h>
47#include <sys/mbuf.h>
48#include <sys/msgbuf.h>
49#include <sys/ioctl.h>
50#include <sys/tty.h>
51#include <sys/user.h>
52#include <sys/exec.h>
53#include <sys/sysctl.h>
54#include <sys/core.h>
55#include <sys/kcore.h>
56#include <sys/extent.h>
57#ifdef SYSVMSG
58#include <sys/msg.h>
59#endif
60#ifdef SYSVSEM
61#include <sys/sem.h>
62#endif
63#ifdef SYSVSHM
64#include <sys/shm.h>
65#endif
66
67#include <sys/mount.h>
68#include <sys/syscallargs.h>
69
70#include <vm/vm.h>
71#include <vm/vm_kern.h>
72#include <uvm/uvm_page.h>
73#include <uvm/uvm.h>
74
75#include <dev/cons.h>
76
77#include <machine/pdc.h>
78#include <machine/iomod.h>
79#include <machine/psl.h>
80#include <machine/reg.h>
81#include <machine/cpufunc.h>
82#include <machine/autoconf.h>
83#include <machine/kcore.h>
84
85#ifdef COMPAT_HPUX
86#include <compat/hpux/hpux.h>
87#endif
88
89#ifdef DDB
90#include <machine/db_machdep.h>
91#include <ddb/db_access.h>
92#include <ddb/db_sym.h>
93#include <ddb/db_extern.h>
94#endif
95
96#include <hppa/dev/cpudevs.h>
97
98/*
99 * Patchable buffer cache parameters
100 */
101int nswbuf = 0;
102#ifdef NBUF
103int nbuf = NBUF;
104#else
105int nbuf = 0;
106#endif
107#ifdef BUFPAGES
108int bufpages = BUFPAGES;
109#else
110int bufpages = 0;
111#endif
112
113/*
114 * Different kinds of flags used throughout the kernel.
115 */
116int cold = 1;		/* unset when engine is up to go */
117int msgbufmapped;	/* set when safe to use msgbuf */
118
119/*
120 * cache configuration, for most machines is the same
121 * numbers, so it makes sense to do defines w/ numbers depending
122 * on cofigured cpu types in the kernel
123 */
124int icache_stride, icache_line_mask;
125int dcache_stride, dcache_line_mask;
126
127/*
128 * things to not kill
129 */
130volatile u_int8_t *machine_ledaddr;
131int machine_ledword, machine_leds;
132
133/*
134 * CPU params (should be the same for all cpus in the system)
135 */
136struct pdc_cache pdc_cache PDC_ALIGNMENT;
137struct pdc_btlb pdc_btlb PDC_ALIGNMENT;
138
139	/* w/ a little deviation should be the same for all installed cpus */
140u_int	cpu_ticksnum, cpu_ticksdenom, cpu_hzticks;
141
142	/* exported info */
143char	machine[] = MACHINE_ARCH;
144char	cpu_model[128];
145enum hppa_cpu_type cpu_type;
146const char *cpu_typename;
147#ifdef COMPAT_HPUX
148int	cpu_model_hpux;	/* contains HPUX_SYSCONF_CPU* kind of value */
149#endif
150
151/*
152 * exported methods for cpus
153 */
154int (*cpu_desidhash) __P((void));
155int (*cpu_hpt_init) __P((vaddr_t hpt, vsize_t hptsize));
156int (*cpu_ibtlb_ins) __P((int i, pa_space_t sp, vaddr_t va, paddr_t pa,
157	    vsize_t sz, u_int prot));
158int (*cpu_dbtlb_ins) __P((int i, pa_space_t sp, vaddr_t va, paddr_t pa,
159	    vsize_t sz, u_int prot));
160
161dev_t	bootdev;
162int	totalphysmem, resvmem, physmem, esym;
163
164/*
165 * Things for MI glue to stick on.
166 */
167struct user *proc0paddr;
168long mem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(8) / sizeof(long)];
169struct extent *hppa_ex;
170
171vm_map_t exec_map = NULL;
172vm_map_t mb_map = NULL;
173vm_map_t phys_map = NULL;
174
175
176void delay_init __P((void));
177static __inline void fall __P((int, int, int, int, int));
178void dumpsys __P((void));
179void hpmc_dump __P((void));
180
181/*
182 * wide used hardware params
183 */
184struct pdc_hwtlb pdc_hwtlb PDC_ALIGNMENT;
185struct pdc_coproc pdc_coproc PDC_ALIGNMENT;
186struct pdc_coherence pdc_coherence PDC_ALIGNMENT;
187struct pdc_spidb pdc_spidbits PDC_ALIGNMENT;
188
189#ifdef DEBUG
190int sigdebug = 0xff;
191pid_t sigpid = 0;
192#define SDB_FOLLOW	0x01
193#endif
194
195/*
196 * Whatever CPU types we support
197 */
198extern const u_int itlb_x[], dtlb_x[], dtlbna_x[], tlbd_x[];
199extern const u_int itlb_s[], dtlb_s[], dtlbna_s[], tlbd_s[];
200extern const u_int itlb_t[], dtlb_t[], dtlbna_t[], tlbd_t[];
201extern const u_int itlb_l[], dtlb_l[], dtlbna_l[], tlbd_l[];
202int iibtlb_s __P((int i, pa_space_t sp, vaddr_t va, paddr_t pa,
203    vsize_t sz, u_int prot));
204int idbtlb_s __P((int i, pa_space_t sp, vaddr_t va, paddr_t pa,
205    vsize_t sz, u_int prot));
206int ibtlb_t __P((int i, pa_space_t sp, vaddr_t va, paddr_t pa,
207    vsize_t sz, u_int prot));
208int ibtlb_l __P((int i, pa_space_t sp, vaddr_t va, paddr_t pa,
209    vsize_t sz, u_int prot));
210int ibtlb_g __P((int i, pa_space_t sp, vaddr_t va, paddr_t pa,
211    vsize_t sz, u_int prot));
212int pbtlb_g __P((int i));
213int hpti_l __P((vaddr_t, vsize_t));
214int hpti_g __P((vaddr_t, vsize_t));
215int desidhash_x __P((void));
216int desidhash_s __P((void));
217int desidhash_t __P((void));
218int desidhash_l __P((void));
219int desidhash_g __P((void));
220const struct hppa_cpu_typed {
221	char name[8];
222	enum hppa_cpu_type type;
223	int  arch;
224	int  features;
225	int (*desidhash) __P((void));
226	const u_int *itlbh, *dtlbh, *dtlbnah, *tlbdh;
227	int (*dbtlbins) __P((int i, pa_space_t sp, vaddr_t va, paddr_t pa,
228	    vsize_t sz, u_int prot));
229	int (*ibtlbins) __P((int i, pa_space_t sp, vaddr_t va, paddr_t pa,
230	    vsize_t sz, u_int prot));
231	int (*btlbprg) __P((int i));
232	int (*hptinit) __P((vaddr_t hpt, vsize_t hptsize));
233} cpu_types[] = {
234#ifdef HP7000_CPU
235	{ "PCX",   hpcx,  0x10, 0,
236	  desidhash_x, itlb_x, dtlb_x, dtlbna_x, tlbd_x,
237	  ibtlb_g, NULL, pbtlb_g},
238#endif
239#ifdef HP7100_CPU
240	{ "PCXS",  hpcxs, 0x11, HPPA_FTRS_BTLBS,
241	  desidhash_s, itlb_s, dtlb_s, dtlbna_s, tlbd_s,
242	  ibtlb_g, NULL, pbtlb_g},
243#endif
244#ifdef HP7200_CPU
245	{ "PCXT",  hpcxt, 0x11, HPPA_FTRS_BTLBU,
246	  desidhash_t, itlb_t, dtlb_t, dtlbna_t, tlbd_t,
247	  ibtlb_g, NULL, pbtlb_g},
248/* HOW?	{ "PCXT'", hpcxta,0x11, HPPA_FTRS_BTLBU,
249	  desidhash_t, itlb_t, dtlb_t, dtlbna_t, tlbd_t,
250	  ibtlb_g, NULL, pbtlb_g}, */
251#endif
252#ifdef HP7100LC_CPU
253	{ "PCXL",  hpcxl, 0x11, HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
254	  desidhash_l, itlb_l, dtlb_l, dtlbna_l, tlbd_l,
255	  ibtlb_g, NULL, pbtlb_g, hpti_g},
256#endif
257#ifdef HP7300LC_CPU
258/* HOW?	{ "PCXL2", hpcxl2,0x11, HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
259	  desidhash_l, itlb_l, dtlb_l, dtlbna_l, tlbd_l,
260	  ibtlb_g, NULL, pbtlb_g, hpti_g}, */
261#endif
262#ifdef HP8000_CPU
263	{ "PCXU",  hpcxu, 0x20, HPPA_FTRS_W32B|HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
264	  desidhash_g, itlb_l, dtlb_l, dtlbna_l, tlbd_l,
265	  ibtlb_g, NULL, pbtlb_g, hpti_g},
266#endif
267#ifdef HP8200_CPU
268/* HOW?	{ "PCXU2", hpcxu2,0x20, HPPA_FTRS_W32B|HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
269	  desidhash_g, itlb_l, dtlb_l, dtlbna_l, tlbd_l,
270	  ibtlb_g, NULL, pbtlb_g, hpti_g}, */
271#endif
272#ifdef HP8500_CPU
273/* HOW?	{ "PCXW",  hpcxw, 0x20, HPPA_FTRS_W32B|HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
274	  desidhash_g, itlb_l, dtlb_l, dtlbna_l, tlbd_l,
275	  ibtlb_g, NULL, pbtlb_g, hpti_g}, */
276#endif
277#ifdef HP8600_CPU
278/* HOW?	{ "PCXW+", hpcxw, 0x20, HPPA_FTRS_W32B|HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
279	  desidhash_g, itlb_l, dtlb_l, dtlbna_l, tlbd_l,
280	  ibtlb_g, NULL, pbtlb_g, hpti_g}, */
281#endif
282	{ "", 0 }
283};
284
285void
286hppa_init(start)
287	paddr_t start;
288{
289	extern int kernel_text;
290	vaddr_t v, vstart, vend;
291	register int error;
292	int hptsize;	/* size of HPT table if supported */
293	int cpu_features = 0;
294
295	boothowto |= RB_SINGLE;	/* XXX always go into single-user while debug */
296
297	pdc_init();	/* init PDC iface, so we can call em easy */
298
299	cpu_hzticks = (PAGE0->mem_10msec * 100) / hz;
300	delay_init();	/* calculate cpu clock ratio */
301
302	/* cache parameters */
303	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_DFLT,
304	    &pdc_cache)) < 0) {
305#ifdef DEBUG
306		printf("WARNING: PDC_CACHE error %d\n", error);
307#endif
308	}
309
310	dcache_line_mask = pdc_cache.dc_conf.cc_line * 16 - 1;
311	dcache_stride = pdc_cache.dc_stride;
312	icache_line_mask = pdc_cache.ic_conf.cc_line * 16 - 1;
313	icache_stride = pdc_cache.ic_stride;
314
315	/* cache coherence params (pbably available for 8k only) */
316	error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_SETCS,
317	    &pdc_coherence, 1, 1, 1, 1);
318#ifdef DEBUG
319	printf ("PDC_CACHE_SETCS: %d, %d, %d, %d (%d)\n",
320	    pdc_coherence.ia_cst, pdc_coherence.da_cst,
321	    pdc_coherence.ita_cst, pdc_coherence.dta_cst, error);
322#endif
323	error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_GETSPIDB,
324	    &pdc_spidbits, 0, 0, 0, 0);
325	printf("SPID bits: 0x%x, error = %d\n", pdc_spidbits.spidbits, error);
326
327	/* setup hpmc handler */
328	{
329		extern u_int hpmc_v;	/* from locore.s */
330		register u_int *p = &hpmc_v;
331
332		if (pdc_call((iodcio_t)pdc, 0, PDC_INSTR, PDC_INSTR_DFLT, p))
333			*p = 0;	/* XXX nop is more appropriate? */
334
335		p[5] = -(p[0] + p[1] + p[2] + p[3] + p[4] + p[6] + p[7]);
336	}
337
338	/* BTLB params */
339	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB,
340	    PDC_BTLB_DEFAULT, &pdc_btlb)) < 0) {
341#ifdef DEBUG
342		printf("WARNING: PDC_BTLB error %d", error);
343#endif
344	} else {
345#ifdef BTLBDEBUG
346		printf("btlb info: minsz=%d, maxsz=%d\n",
347		    pdc_btlb.min_size, pdc_btlb.max_size);
348		printf("btlb fixed: i=%d, d=%d, c=%d\n",
349		    pdc_btlb.finfo.num_i,
350		    pdc_btlb.finfo.num_d,
351		    pdc_btlb.finfo.num_c);
352		printf("btlb varbl: i=%d, d=%d, c=%d\n",
353		    pdc_btlb.vinfo.num_i,
354		    pdc_btlb.vinfo.num_d,
355		    pdc_btlb.vinfo.num_c);
356#endif /* BTLBDEBUG */
357		/* purge TLBs and caches */
358		if (pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB,
359		    PDC_BTLB_PURGE_ALL) < 0)
360			printf("WARNING: BTLB purge failed\n");
361
362		cpu_features = pdc_btlb.finfo.num_c?
363		    HPPA_FTRS_BTLBU : HPPA_FTRS_BTLBS;
364	}
365
366	ptlball();
367	fcacheall();
368
369	totalphysmem = PAGE0->imm_max_mem / NBPG;
370	resvmem = ((vaddr_t)&kernel_text) / NBPG;
371
372	/* calculate HPT size */
373	/* for (hptsize = 256; hptsize < totalphysmem; hptsize *= 2); */
374hptsize=256;	/* XXX one page for now */
375	hptsize *= 16;	/* sizeof(hpt_entry) */
376
377	if (pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_INFO, &pdc_hwtlb) &&
378	    !pdc_hwtlb.min_size && !pdc_hwtlb.max_size) {
379		printf("WARNING: no HPT support, fine!\n");
380		mtctl(hptsize - 1, CR_HPTMASK);
381		hptsize = 0;
382	} else {
383		cpu_features |= HPPA_FTRS_HVT;
384
385		if (hptsize > pdc_hwtlb.max_size)
386			hptsize = pdc_hwtlb.max_size;
387		else if (hptsize < pdc_hwtlb.min_size)
388			hptsize = pdc_hwtlb.min_size;
389		mtctl(hptsize - 1, CR_HPTMASK);
390	}
391
392	/*
393	 * Deal w/ CPU now
394	 */
395	{
396		const struct hppa_cpu_typed *p;
397
398		for (p = cpu_types;
399		     p->arch && p->features != cpu_features; p++);
400
401		if (!p->arch)
402			printf("WARNING: UNKNOWN CPU TYPE; GOOD LUCK (%x)\n",
403			    cpu_features);
404		else {
405			/*
406			 * Ptrs to various tlb handlers, to be filled
407			 * based on cpu features.
408			 * from locore.S
409			 */
410			extern u_int trap_ep_T_TLB_DIRTY[];
411			extern u_int trap_ep_T_DTLBMISS[];
412			extern u_int trap_ep_T_DTLBMISSNA[];
413			extern u_int trap_ep_T_ITLBMISS[];
414			extern u_int trap_ep_T_ITLBMISSNA[];
415
416			cpu_type      = p->type;
417			cpu_typename  = p->name;
418			cpu_ibtlb_ins = p->ibtlbins;
419			cpu_dbtlb_ins = p->dbtlbins;
420			cpu_hpt_init  = p->hptinit;
421			cpu_desidhash = p->desidhash;
422
423#define	LDILDO(t,f) ((t)[0] = (f)[0], (t)[1] = (f)[1])
424			LDILDO(trap_ep_T_TLB_DIRTY , p->tlbdh);
425			LDILDO(trap_ep_T_DTLBMISS  , p->dtlbh);
426			LDILDO(trap_ep_T_DTLBMISSNA, p->dtlbnah);
427			LDILDO(trap_ep_T_ITLBMISS  , p->itlbh);
428			LDILDO(trap_ep_T_ITLBMISSNA, p->itlbh);
429#undef LDILDO
430		}
431	}
432
433	/* we hope this won't fail */
434	hppa_ex = extent_create("mem", 0x0, 0xffffffff, M_DEVBUF,
435	    (caddr_t)mem_ex_storage, sizeof(mem_ex_storage),
436	    EX_NOCOALESCE|EX_NOWAIT);
437	if (extent_alloc_region(hppa_ex, 0, (vaddr_t)PAGE0->imm_max_mem,
438	    EX_NOWAIT))
439		panic("cannot reserve main memory");
440
441	vstart = hppa_round_page(start);
442	vend = VM_MAX_KERNEL_ADDRESS;
443
444	/*
445	 * Now allocate kernel dynamic variables
446	 */
447
448	/* buffer cache parameters */
449#ifndef BUFCACHEPERCENT
450#define BUFCACHEPERCENT 10
451#endif /* BUFCACHEPERCENT */
452	if (bufpages == 0)
453		bufpages = totalphysmem / 100 *
454		    (totalphysmem <= 0x1000? 5 : BUFCACHEPERCENT);
455
456	if (nbuf == 0)
457		nbuf = bufpages < 16? 16 : bufpages;
458
459	/* Restrict to at most 70% filled kvm */
460	if (nbuf * MAXBSIZE >
461	    (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) * 7 / 10)
462		nbuf = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) /
463		    MAXBSIZE * 7 / 10;
464
465	/* More buffer pages than fits into the buffers is senseless. */
466	if (bufpages > nbuf * MAXBSIZE / CLBYTES)
467		bufpages = nbuf * MAXBSIZE / CLBYTES;
468
469	if (nswbuf == 0) {
470		nswbuf = (nbuf / 2) &~ 1;
471		if (nswbuf > 256)
472			nswbuf = 256;
473	}
474
475	v = vstart;
476#define valloc(name, type, num) (name) = (type *)v; v = (vaddr_t)((name)+(num))
477
478#ifdef REAL_CLISTS
479	valloc(cfree, struct cblock, nclist);
480#endif
481
482	valloc(timeouts, struct timeout, ntimeout);
483	valloc(buf, struct buf, nbuf);
484
485#ifdef SYSVSHM
486	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
487#endif
488#ifdef SYSVSEM
489	valloc(sema, struct semid_ds, seminfo.semmni);
490	valloc(sem, struct sem, seminfo.semmns);
491	/* This is pretty disgusting! */
492	valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
493#endif
494#ifdef SYSVMSG
495	valloc(msgpool, char, msginfo.msgmax);
496	valloc(msgmaps, struct msgmap, msginfo.msgseg);
497	valloc(msghdrs, struct msg, msginfo.msgtql);
498	valloc(msqids, struct msqid_ds, msginfo.msgmni);
499#endif
500#undef valloc
501
502	v = hppa_round_page(v);
503	bzero ((void *)vstart, (v - vstart));
504	vstart = v;
505
506	pmap_bootstrap(&vstart, &vend);
507	physmem = totalphysmem - btoc(vstart);
508
509	/* alloc msgbuf */
510	if (!(msgbufp = (void *)pmap_steal_memory(MSGBUFSIZE, NULL, NULL)))
511		panic("cannot allocate msgbuf");
512	msgbufmapped = 1;
513
514	/* Turn on the HW TLB assist */
515	if (hptsize) {
516		u_int hpt;
517
518		mfctl(CR_VTOP, hpt);
519		if ((error = (cpu_hpt_init)(hpt, hptsize)) < 0) {
520#ifdef DEBUG
521			printf("WARNING: HPT init error %d\n", error);
522#endif
523		} else {
524#ifdef PMAPDEBUG
525			printf("HPT: %d entries @ 0x%x\n",
526			    hptsize / sizeof(struct hpt_entry), hpt);
527#endif
528		}
529	}
530
531	/* locate coprocessors and SFUs */
532	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_COPROC, PDC_COPROC_DFLT,
533	    &pdc_coproc)) < 0)
534		printf("WARNING: PDC_COPROC error %d\n", error);
535	else {
536#ifdef DEBUG
537		printf("pdc_coproc: %x, %x\n", pdc_coproc.ccr_enable,
538		    pdc_coproc.ccr_present);
539#endif
540		mtctl(pdc_coproc.ccr_enable & CCR_MASK, CR_CCR);
541	}
542
543	/* they say PDC_COPROC might turn fault light on */
544	pdc_call((iodcio_t)pdc, PDC_CHASSIS, PDC_CHASSIS_DISP,
545	    PDC_OSTAT(PDC_OSTAT_RUN) | 0xCEC0);
546
547#ifdef DDB
548	ddb_init();
549#endif
550}
551
552void
553cpu_startup()
554{
555	struct pdc_model pdc_model PDC_ALIGNMENT;
556	vaddr_t minaddr, maxaddr;
557	vsize_t size;
558	int base, residual;
559	int err, i;
560#ifdef DEBUG
561	extern int pmapdebug;
562	int opmapdebug = pmapdebug;
563
564	pmapdebug = 0;
565#endif
566
567	/*
568	 * i won't understand a friend of mine,
569	 * who sat in a room full of artificial ice,
570	 * fogging the air w/ humid cries --
571	 *	WELCOME TO SUMMER!
572	 */
573	printf(version);
574
575	/* identify system type */
576	if ((err = pdc_call((iodcio_t)pdc, 0, PDC_MODEL, PDC_MODEL_INFO,
577	    &pdc_model)) < 0) {
578#ifdef DEBUG
579		printf("WARNING: PDC_MODEL error %d\n", err);
580#endif
581	} else {
582		const char *p, *q;
583		i = pdc_model.hvers >> 4;
584		p = hppa_mod_info(HPPA_TYPE_BOARD, i);
585		switch (pdc_model.arch_rev) {
586		default:
587		case 0:
588			q = "1.0";
589#ifdef COMPAT_HPUX
590			cpu_model_hpux = HPUX_SYSCONF_CPUPA10;
591#endif
592			break;
593		case 4:
594			q = "1.1";
595#ifdef COMPAT_HPUX
596			cpu_model_hpux = HPUX_SYSCONF_CPUPA11;
597#endif
598			break;
599		case 8:
600			q = "2.0";
601#ifdef COMPAT_HPUX
602			cpu_model_hpux = HPUX_SYSCONF_CPUPA20;
603#endif
604			break;
605		}
606
607		if (p)
608			sprintf(cpu_model, "HP9000/%s PA-RISC %s", p, q);
609		else
610			sprintf(cpu_model, "HP9000/(UNKNOWN %x) PA-RISC %s",
611				i, q);
612		printf("%s\n", cpu_model);
613	}
614
615	printf("real mem = %d (%d reserved for PROM, %d used by OpenBSD)\n",
616	    ctob(totalphysmem), ctob(resvmem), ctob(physmem));
617
618	/*
619	 * Now allocate buffers proper.  They are different than the above
620	 * in that they usually occupy more virtual memory than physical.
621	 */
622	size = MAXBSIZE * nbuf;
623	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
624	    NULL, UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
625	    UVM_INH_NONE, UVM_ADV_NORMAL, 0)) != KERN_SUCCESS)
626		panic("cpu_startup: cannot allocate VM for buffers");
627	minaddr = (vaddr_t)buffers;
628	base = bufpages / nbuf;
629	residual = bufpages % nbuf;
630	for (i = 0; i < nbuf; i++) {
631		vsize_t curbufsize;
632		vaddr_t curbuf;
633		struct vm_page *pg;
634
635		/*
636		 * First <residual> buffers get (base+1) physical pages
637		 * allocated for them.  The rest get (base) physical pages.
638		 *
639		 * The rest of each buffer occupies virtual space,
640		 * but has no physical memory allocated for it.
641		 */
642		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
643		curbufsize = CLBYTES * ((i < residual) ? (base+1) : base);
644
645		while (curbufsize) {
646			if ((pg = uvm_pagealloc(NULL, 0, NULL, 0)) == NULL)
647				panic("cpu_startup: not enough memory for "
648				    "buffer cache");
649			pmap_enter(kernel_map->pmap, curbuf,
650			    VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE,
651			    TRUE, VM_PROT_READ|VM_PROT_WRITE);
652			curbuf += PAGE_SIZE;
653			curbufsize -= PAGE_SIZE;
654		}
655	}
656
657	/*
658	 * Allocate a submap for exec arguments.  This map effectively
659	 * limits the number of processes exec'ing at any time.
660	 */
661	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
662	    16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
663
664	/*
665	 * Allocate a submap for physio
666	 */
667	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
668	    VM_PHYS_SIZE, 0, FALSE, NULL);
669
670	/*
671	 * Finally, allocate mbuf pool.  Since mclrefcnt is an off-size
672	 * we use the more space efficient malloc in place of kmem_alloc.
673	 */
674	mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
675	    M_MBUF, M_NOWAIT);
676	bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
677	mb_map = uvm_km_suballoc(kernel_map, (vaddr_t *)&mbutl, &maxaddr,
678	    VM_MBUF_SIZE, VM_MAP_INTRSAFE, FALSE, NULL);
679
680	/*
681	 * Initialize timeouts
682	 */
683	timeout_init();
684
685#ifdef DEBUG
686	pmapdebug = opmapdebug;
687#endif
688	printf("avail mem = %ld\n", ptoa(uvmexp.free));
689	printf("using %d buffers containing %d bytes of memory\n",
690	    nbuf, bufpages * CLBYTES);
691
692	/*
693	 * Set up buffers, so they can be used to read disk labels.
694	 */
695	bufinit();
696
697	/*
698	 * Configure the system.
699	 */
700	if (boothowto & RB_CONFIG) {
701#ifdef BOOT_CONFIG
702		user_config();
703#else
704		printf("kernel does not support -c; continuing..\n");
705#endif
706	}
707	configure();
708}
709
710/*
711 * compute cpu clock ratio such as:
712 *	cpu_ticksnum / cpu_ticksdenom = t + delta
713 *	delta -> 0
714 */
715void
716delay_init(void)
717{
718	register u_int num, denom, delta, mdelta;
719
720	mdelta = UINT_MAX;
721	for (denom = 1; denom < 1000; denom++) {
722		num = (PAGE0->mem_10msec * denom) / 10000;
723		delta = num * 10000 / denom - PAGE0->mem_10msec;
724		if (!delta) {
725			cpu_ticksdenom = denom;
726			cpu_ticksnum = num;
727			break;
728		} else if (delta < mdelta) {
729			cpu_ticksdenom = denom;
730			cpu_ticksnum = num;
731		}
732	}
733}
734
735void
736delay(us)
737	u_int us;
738{
739	register u_int start, end, n;
740
741	mfctl(CR_ITMR, start);
742	while (us) {
743		n = min(1000, us);
744		end = start + n * cpu_ticksnum / cpu_ticksdenom;
745
746		/* N.B. Interval Timer may wrap around */
747		if (end < start)
748			do
749				mfctl(CR_ITMR, start);
750			while (start > end);
751
752		do
753			mfctl(CR_ITMR, start);
754		while (start < end);
755
756		us -= n;
757		mfctl(CR_ITMR, start);
758	}
759}
760
761static __inline void
762fall(c_base, c_count, c_loop, c_stride, data)
763	int c_base, c_count, c_loop, c_stride, data;
764{
765	register int loop;
766
767	for (; c_count--; c_base += c_stride)
768		for (loop = c_loop; loop--; )
769			if (data)
770				fdce(0, c_base);
771			else
772				fice(0, c_base);
773}
774
775void
776fcacheall()
777{
778	/*
779	 * Flush the instruction, then data cache.
780	 */
781	fall(pdc_cache.ic_base, pdc_cache.ic_count, pdc_cache.ic_loop,
782	    pdc_cache.ic_stride, 0);
783	sync_caches();
784	fall(pdc_cache.dc_base, pdc_cache.dc_count, pdc_cache.dc_loop,
785	    pdc_cache.dc_stride, 1);
786	sync_caches();
787}
788
789void
790ptlball()
791{
792	register pa_space_t sp;
793	register int i, j, k;
794
795	/* instruction TLB */
796	sp = pdc_cache.it_sp_base;
797	for (i = 0; i < pdc_cache.it_sp_count; i++) {
798		register vaddr_t off = pdc_cache.it_off_base;
799		for (j = 0; j < pdc_cache.it_off_count; j++) {
800			for (k = 0; k < pdc_cache.it_loop; k++)
801				pitlbe(sp, off);
802			off += pdc_cache.it_off_stride;
803		}
804		sp += pdc_cache.it_sp_stride;
805	}
806
807	/* data TLB */
808	sp = pdc_cache.dt_sp_base;
809	for (i = 0; i < pdc_cache.dt_sp_count; i++) {
810		register vaddr_t off = pdc_cache.dt_off_base;
811		for (j = 0; j < pdc_cache.dt_off_count; j++) {
812			for (k = 0; k < pdc_cache.dt_loop; k++)
813				pdtlbe(sp, off);
814			off += pdc_cache.dt_off_stride;
815		}
816		sp += pdc_cache.dt_sp_stride;
817	}
818}
819
820int
821desidhash_g()
822{
823	/* TODO call PDC to disable SID hashing in the cache index */
824
825	return 0;
826}
827
828int
829hpti_g(hpt, hptsize)
830	vaddr_t hpt;
831	vsize_t hptsize;
832{
833	return pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_CONFIG,
834	    &pdc_hwtlb, hpt, hptsize, PDC_TLB_CURRPDE);
835}
836
837int
838pbtlb_g(i)
839	int i;
840{
841	return -1;
842}
843
844int
845ibtlb_g(i, sp, va, pa, sz, prot)
846	int i;
847	pa_space_t sp;
848	vaddr_t va;
849	paddr_t pa;
850	vsize_t sz;
851	u_int prot;
852{
853	int error;
854
855	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB, PDC_BTLB_INSERT,
856	    sp, va, pa, sz, prot, i)) < 0) {
857#ifdef BTLBDEBUG
858		printf("WARNING: BTLB insert failed (%d)\n", error);
859#endif
860	}
861	return error;
862}
863
864int
865btlb_insert(space, va, pa, lenp, prot)
866	pa_space_t space;
867	vaddr_t va;
868	paddr_t pa;
869	vsize_t *lenp;
870	u_int prot;
871{
872	static u_int32_t mask;
873	register vsize_t len;
874	register int error, i;
875
876	/* align size */
877	for (len = pdc_btlb.min_size << PGSHIFT; len < *lenp; len <<= 1);
878	len >>= PGSHIFT;
879	i = ffs(~mask) - 1;
880	if (len > pdc_btlb.max_size || i < 0) {
881#ifdef BTLBDEBUG
882		printf("btln_insert: too big (%u < %u < %u)\n",
883		    pdc_btlb.min_size, len, pdc_btlb.max_size);
884#endif
885		return -(ENOMEM);
886	}
887
888	mask |= 1 << i;
889	pa >>= PGSHIFT;
890	va >>= PGSHIFT;
891	/* check address alignment */
892	if (pa & (len - 1))
893		printf("WARNING: BTLB address misaligned\n");
894
895	/* ensure IO space is uncached */
896	if ((pa & 0xF0000) == 0xF0000)
897		prot |= TLB_UNCACHEABLE;
898
899#ifdef BTLBDEBUG
900	printf("btlb_insert(%d): %x:%x=%x[%x,%x]\n", i, space, va, pa, len, prot);
901#endif
902	if ((error = (*cpu_dbtlb_ins)(i, space, va, pa, len, prot)) < 0)
903		return -(EINVAL);
904	*lenp = len << PGSHIFT;
905
906	return i;
907}
908
909int waittime = -1;
910
911void
912boot(howto)
913	int howto;
914{
915	if (cold)
916		/* XXX howto |= RB_HALT */;
917	else {
918		boothowto = howto | (boothowto & RB_HALT);
919
920		if (!(howto & RB_NOSYNC) && waittime < 0) {
921			extern struct proc proc0;
922
923			/* protect against curproc->p_stats refs in sync XXX */
924			if (curproc == NULL)
925				curproc = &proc0;
926
927			waittime = 0;
928			vfs_shutdown();
929			if ((howto & RB_TIMEBAD) == 0)
930				resettodr();
931			else
932				printf("WARNING: not updating battery clock\n");
933		}
934	}
935
936	/* XXX probably save howto into stable storage */
937
938	splhigh();
939
940	if ((howto & (RB_DUMP /* | RB_HALT */)) == RB_DUMP)
941		dumpsys();
942
943	doshutdownhooks();
944
945	if (howto & RB_HALT) {
946		printf("System halted!\n");
947		__asm __volatile("stwas %0, 0(%1)"
948		    :: "r" (CMD_STOP), "r" (LBCAST_ADDR + iomod_command));
949	} else {
950		printf("rebooting...");
951		DELAY(1000000);
952		__asm __volatile("stwas %0, 0(%1)"
953		    :: "r" (CMD_RESET), "r" (LBCAST_ADDR + iomod_command));
954	}
955
956	for(;;); /* loop while bus reset is comming up */
957	/* NOTREACHED */
958}
959
960u_long	dumpmag = 0x8fca0101;	/* magic number */
961int	dumpsize = 0;		/* pages */
962long	dumplo = 0;		/* blocks */
963
964/*
965 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
966 */
967int
968cpu_dumpsize()
969{
970	int size;
971
972	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
973	if (roundup(size, dbtob(1)) != dbtob(1))
974		return -1;
975
976	return 1;
977}
978
979/*
980 * Called from HPMC handler in locore
981 */
982void
983hpmc_dump()
984{
985
986}
987
988int
989cpu_dump()
990{
991	long buf[dbtob(1) / sizeof (long)];
992	kcore_seg_t	*segp;
993	cpu_kcore_hdr_t	*cpuhdrp;
994
995	segp = (kcore_seg_t *)buf;
996	cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp)) / sizeof (long)];
997
998	/*
999	 * Generate a segment header.
1000	 */
1001	CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1002	segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
1003
1004	/*
1005	 * Add the machine-dependent header info
1006	 */
1007	/* nothing for now */
1008
1009	return (bdevsw[major(dumpdev)].d_dump)
1010	    (dumpdev, dumplo, (caddr_t)buf, dbtob(1));
1011}
1012
1013/*
1014 * Dump the kernel's image to the swap partition.
1015 */
1016#define	BYTES_PER_DUMP	NBPG
1017
1018void
1019dumpsys()
1020{
1021	int psize, bytes, i, n;
1022	register caddr_t maddr;
1023	register daddr_t blkno;
1024	register int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
1025	register int error;
1026
1027	/* Save registers
1028	savectx(&dumppcb); */
1029
1030	if (dumpsize == 0)
1031		dumpconf();
1032	if (dumplo <= 0) {
1033		printf("\ndump to dev %x not possible\n", dumpdev);
1034		return;
1035	}
1036	printf("\ndumping to dev %x, offset %ld\n", dumpdev, dumplo);
1037
1038	psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
1039	printf("dump ");
1040	if (psize == -1) {
1041		printf("area unavailable\n");
1042		return;
1043	}
1044
1045	if (!(error = cpu_dump())) {
1046
1047		bytes = ctob(physmem);
1048		maddr = NULL;
1049		blkno = dumplo + cpu_dumpsize();
1050		dump = bdevsw[major(dumpdev)].d_dump;
1051		/* TODO block map the whole physical memory */
1052		for (i = 0; i < bytes; i += n) {
1053
1054			/* Print out how many MBs we are to go. */
1055			n = bytes - i;
1056			if (n && (n % (1024*1024)) == 0)
1057				printf("%d ", n / (1024 * 1024));
1058
1059			/* Limit size for next transfer. */
1060
1061			if (n > BYTES_PER_DUMP)
1062				n = BYTES_PER_DUMP;
1063
1064			if ((error = (*dump)(dumpdev, blkno, maddr, n)))
1065				break;
1066			maddr += n;
1067			blkno += btodb(n);
1068		}
1069	}
1070
1071	switch (error) {
1072	case ENXIO:	printf("device bad\n");			break;
1073	case EFAULT:	printf("device not ready\n");		break;
1074	case EINVAL:	printf("area improper\n");		break;
1075	case EIO:	printf("i/o error\n");			break;
1076	case EINTR:	printf("aborted from console\n");	break;
1077	case 0:		printf("succeeded\n");			break;
1078	default:	printf("error %d\n", error);		break;
1079	}
1080}
1081
1082/* bcopy(), error on fault */
1083int
1084kcopy(from, to, size)
1085	const void *from;
1086	void *to;
1087	size_t size;
1088{
1089	register u_int oldh = curproc->p_addr->u_pcb.pcb_onfault;
1090
1091	curproc->p_addr->u_pcb.pcb_onfault = (u_int)&copy_on_fault;
1092	bcopy(from, to, size);
1093	curproc->p_addr->u_pcb.pcb_onfault = oldh;
1094
1095	return 0;
1096}
1097
1098int
1099copystr(src, dst, size, lenp)
1100	const void *src;
1101	void *dst;
1102	size_t size;
1103	size_t *lenp;
1104{
1105	return spstrcpy(HPPA_SID_KERNEL, src, HPPA_SID_KERNEL, dst, size, lenp);
1106}
1107
1108int
1109copyinstr(src, dst, size, lenp)
1110	const void *src;
1111	void *dst;
1112	size_t size;
1113	size_t *lenp;
1114{
1115	return spstrcpy(curproc->p_addr->u_pcb.pcb_space, src,
1116	    HPPA_SID_KERNEL, dst, size, lenp);
1117}
1118
1119
1120int
1121copyoutstr(src, dst, size, lenp)
1122	const void *src;
1123	void *dst;
1124	size_t size;
1125	size_t *lenp;
1126{
1127	return spstrcpy(HPPA_SID_KERNEL, src,
1128	    curproc->p_addr->u_pcb.pcb_space, dst, size, lenp);
1129}
1130
1131
1132int
1133copyin(src, dst, size)
1134	const void *src;
1135	void *dst;
1136	size_t size;
1137{
1138	return spcopy(curproc->p_addr->u_pcb.pcb_space, src,
1139	    HPPA_SID_KERNEL, dst, size);
1140}
1141
1142int
1143copyout(src, dst, size)
1144	const void *src;
1145	void *dst;
1146	size_t size;
1147{
1148	return spcopy(HPPA_SID_KERNEL, src,
1149	    curproc->p_addr->u_pcb.pcb_space, dst, size);
1150}
1151
1152/*
1153 * Set registers on exec.
1154 */
1155void
1156setregs(p, pack, stack, retval)
1157	register struct proc *p;
1158	struct exec_package *pack;
1159	u_long stack;
1160	register_t *retval;
1161{
1162	register struct trapframe *tf = p->p_md.md_regs;
1163	/* register struct pcb *pcb = &p->p_addr->u_pcb; */
1164#ifdef DEBUG
1165	/*extern int pmapdebug;*/
1166	/*pmapdebug = 13;*/
1167	printf("setregs(%p, %p, %x, %p), ep=%x, cr30=%x\n",
1168	    p, pack, stack, retval, pack->ep_entry, tf->tf_cr30);
1169#endif
1170
1171	tf->tf_iioq_tail = 4 +
1172	    (tf->tf_iioq_head = pack->ep_entry | HPPA_PC_PRIV_USER);
1173	tf->tf_rp = 0;
1174	tf->tf_arg0 = (u_long)PS_STRINGS;
1175	tf->tf_arg1 = tf->tf_arg2 = 0; /* XXX dynload stuff */
1176
1177	/* setup terminal stack frame */
1178	stack += HPPA_FRAME_SIZE;
1179	suword((caddr_t)(stack + HPPA_FRAME_PSP), 0);
1180	tf->tf_sp = stack;
1181
1182	retval[1] = 0;
1183}
1184
1185/*
1186 * Send an interrupt to process.
1187 */
1188void
1189sendsig(catcher, sig, mask, code, type, val)
1190	sig_t catcher;
1191	int sig, mask;
1192	u_long code;
1193	int type;
1194	union sigval val;
1195{
1196	struct proc *p = curproc;
1197	struct trapframe sf, *tf = p->p_md.md_regs;
1198	register_t sp = tf->tf_sp;
1199
1200#ifdef DEBUG
1201	if ((sigdebug | SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1202		printf("sendsig: %s[%d] sig %d catcher %p\n",
1203		    p->p_comm, p->p_pid, sig, catcher);
1204#endif
1205
1206	sf = *tf;
1207	/* TODO send signal */
1208
1209	if (copyout(&sf, (void *)sp, sizeof(sf)))
1210		sigexit(p, SIGILL);
1211}
1212
1213int
1214sys_sigreturn(p, v, retval)
1215	struct proc *p;
1216	void *v;
1217	register_t *retval;
1218{
1219	/* TODO sigreturn */
1220	return EINVAL;
1221}
1222
1223/*
1224 * machine dependent system variables.
1225 */
1226int
1227cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1228	int *name;
1229	u_int namelen;
1230	void *oldp;
1231	size_t *oldlenp;
1232	void *newp;
1233	size_t newlen;
1234	struct proc *p;
1235{
1236	dev_t consdev;
1237	/* all sysctl names at this level are terminal */
1238	if (namelen != 1)
1239		return (ENOTDIR);	/* overloaded */
1240	switch (name[0]) {
1241	case CPU_CONSDEV:
1242		if (cn_tab != NULL)
1243			consdev = cn_tab->cn_dev;
1244		else
1245			consdev = NODEV;
1246		return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
1247		    sizeof consdev));
1248	default:
1249		return (EOPNOTSUPP);
1250	}
1251	/* NOTREACHED */
1252}
1253
1254
1255/*
1256 * consinit:
1257 * initialize the system console.
1258 */
1259void
1260consinit()
1261{
1262	static int initted;
1263
1264	if (!initted) {
1265		initted++;
1266		cninit();
1267	}
1268}
1269