machdep.c revision 1.104
1/*	$OpenBSD: machdep.c,v 1.104 2003/05/11 19:41:09 deraadt Exp $	*/
2
3/*
4 * Copyright (c) 1999-2002 Michael Shalayeff
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *      This product includes software developed by Michael Shalayeff.
18 * 4. The name of the author may not be used to endorse or promote products
19 *    derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/signalvar.h>
37#include <sys/kernel.h>
38#include <sys/proc.h>
39#include <sys/buf.h>
40#include <sys/reboot.h>
41#include <sys/device.h>
42#include <sys/conf.h>
43#include <sys/file.h>
44#include <sys/timeout.h>
45#include <sys/malloc.h>
46#include <sys/mbuf.h>
47#include <sys/msgbuf.h>
48#include <sys/ioctl.h>
49#include <sys/tty.h>
50#include <sys/user.h>
51#include <sys/exec.h>
52#include <sys/sysctl.h>
53#include <sys/core.h>
54#include <sys/kcore.h>
55#include <sys/extent.h>
56#ifdef SYSVMSG
57#include <sys/msg.h>
58#endif
59
60#include <sys/mount.h>
61#include <sys/syscallargs.h>
62
63#include <uvm/uvm.h>
64#include <uvm/uvm_page.h>
65
66#include <dev/cons.h>
67
68#include <machine/pdc.h>
69#include <machine/iomod.h>
70#include <machine/psl.h>
71#include <machine/reg.h>
72#include <machine/cpufunc.h>
73#include <machine/autoconf.h>
74#include <machine/kcore.h>
75
76#ifdef COMPAT_HPUX
77#include <compat/hpux/hpux.h>
78#endif
79
80#ifdef DDB
81#include <machine/db_machdep.h>
82#include <ddb/db_access.h>
83#include <ddb/db_sym.h>
84#include <ddb/db_extern.h>
85#endif
86
87#include <hppa/dev/cpudevs.h>
88
89/*
90 * Patchable buffer cache parameters
91 */
92#ifdef NBUF
93int nbuf = NBUF;
94#else
95int nbuf = 0;
96#endif
97
98#ifndef BUFCACHEPERCENT
99#define BUFCACHEPERCENT 10
100#endif /* BUFCACHEPERCENT */
101
102#ifdef BUFPAGES
103int bufpages = BUFPAGES;
104#else
105int bufpages = 0;
106#endif
107int bufcachepercent = BUFCACHEPERCENT;
108
109/*
110 * Different kinds of flags used throughout the kernel.
111 */
112int cold = 1;			/* unset when engine is up to go */
113extern int msgbufmapped;	/* set when safe to use msgbuf */
114
115/*
116 * cache configuration, for most machines is the same
117 * numbers, so it makes sense to do defines w/ numbers depending
118 * on configured cpu types in the kernel
119 */
120int icache_stride, icache_line_mask;
121int dcache_stride, dcache_line_mask;
122
123/*
124 * things to not kill
125 */
126volatile u_int8_t *machine_ledaddr;
127int machine_ledword, machine_leds;
128
129/*
130 * CPU params (should be the same for all cpus in the system)
131 */
132struct pdc_cache pdc_cache PDC_ALIGNMENT;
133struct pdc_btlb pdc_btlb PDC_ALIGNMENT;
134
135	/* w/ a little deviation should be the same for all installed cpus */
136u_int	cpu_itmr, cpu_ticksnum, cpu_ticksdenom, cpu_hzticks;
137
138	/* exported info */
139char	machine[] = MACHINE_ARCH;
140char	cpu_model[128];
141enum hppa_cpu_type cpu_type;
142const char *cpu_typename;
143#ifdef COMPAT_HPUX
144int	cpu_model_hpux;	/* contains HPUX_SYSCONF_CPU* kind of value */
145#endif
146
147/*
148 * exported methods for cpus
149 */
150int (*cpu_desidhash)(void);
151int (*cpu_hpt_init)(vaddr_t hpt, vsize_t hptsize);
152int (*cpu_ibtlb_ins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
153	    vsize_t sz, u_int prot);
154int (*cpu_dbtlb_ins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
155	    vsize_t sz, u_int prot);
156
157dev_t	bootdev;
158int	totalphysmem, resvmem, physmem, esym;
159paddr_t	avail_end;
160
161/*
162 * Things for MI glue to stick on.
163 */
164struct user *proc0paddr;
165long mem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(32) / sizeof(long)];
166struct extent *hppa_ex;
167
168struct vm_map *exec_map = NULL;
169struct vm_map *phys_map = NULL;
170/* Virtual page frame for /dev/mem (see mem.c) */
171vaddr_t vmmap;
172
173void delay_init(void);
174static __inline void fall(int, int, int, int, int);
175void dumpsys(void);
176void hpmc_dump(void);
177void hppa_user2frame(struct trapframe *sf, struct trapframe *tf);
178
179/*
180 * wide used hardware params
181 */
182struct pdc_hwtlb pdc_hwtlb PDC_ALIGNMENT;
183struct pdc_coproc pdc_coproc PDC_ALIGNMENT;
184struct pdc_coherence pdc_coherence PDC_ALIGNMENT;
185struct pdc_spidb pdc_spidbits PDC_ALIGNMENT;
186
187#ifdef DEBUG
188int sigdebug = 0;
189pid_t sigpid = 0;
190#define SDB_FOLLOW	0x01
191#endif
192
193/*
194 * Whatever CPU types we support
195 */
196extern const u_int itlb_x[], itlbna_x[], dtlb_x[], dtlbna_x[], tlbd_x[];
197extern const u_int itlb_s[], itlbna_s[], dtlb_s[], dtlbna_s[], tlbd_s[];
198extern const u_int itlb_t[], itlbna_t[], dtlb_t[], dtlbna_t[], tlbd_t[];
199extern const u_int itlb_l[], itlbna_l[], dtlb_l[], dtlbna_l[], tlbd_l[];
200int iibtlb_s(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
201    vsize_t sz, u_int prot);
202int idbtlb_s(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
203    vsize_t sz, u_int prot);
204int ibtlb_t(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
205    vsize_t sz, u_int prot);
206int ibtlb_l(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
207    vsize_t sz, u_int prot);
208int ibtlb_g(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
209    vsize_t sz, u_int prot);
210int pbtlb_g(int i);
211int hpti_l(vaddr_t, vsize_t);
212int hpti_g(vaddr_t, vsize_t);
213int desidhash_x(void);
214int desidhash_s(void);
215int desidhash_t(void);
216int desidhash_l(void);
217int desidhash_g(void);
218const struct hppa_cpu_typed {
219	char name[8];
220	enum hppa_cpu_type type;
221	int  arch;
222	int  features;
223	int (*desidhash)(void);
224	const u_int *itlbh, *itlbnah, *dtlbh, *dtlbnah, *tlbdh;
225	int (*dbtlbins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
226	    vsize_t sz, u_int prot);
227	int (*ibtlbins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
228	    vsize_t sz, u_int prot);
229	int (*btlbprg)(int i);
230	int (*hptinit)(vaddr_t hpt, vsize_t hptsize);
231} cpu_types[] = {
232#ifdef HP7000_CPU
233	{ "PCXS",   hpcx,  0x10, 0,
234	  desidhash_s, itlb_s, itlbna_s, dtlb_s, dtlbna_s, tlbd_s,
235	  ibtlb_g, NULL, pbtlb_g},
236#endif
237#ifdef HP7100_CPU
238	{ "PCXT",  hpcxs, 0x11, HPPA_FTRS_BTLBU,
239	  desidhash_t, itlb_t, itlbna_t, dtlb_t, dtlbna_t, tlbd_t,
240	  ibtlb_g, NULL, pbtlb_g},
241#endif
242#ifdef HP7200_CPU
243/* these seem to support the cpu model pdc call */
244/* HOW?	{ "PCXT'", hpcxta,0x11, HPPA_FTRS_BTLBU,
245	  desidhash_t, itlb_t, itlbna_l, dtlb_t, dtlbna_t, tlbd_t,
246	  ibtlb_g, NULL, pbtlb_g}, */
247#endif
248#ifdef HP7100LC_CPU
249	{ "PCXL",  hpcxl, 0x11, HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
250	  desidhash_l, itlb_l, itlbna_l, dtlb_l, dtlbna_l, tlbd_l,
251	  ibtlb_g, NULL, pbtlb_g, hpti_l},
252#endif
253#ifdef HP7300LC_CPU
254/* HOW?	{ "PCXL2", hpcxl2,0x11, HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
255	  desidhash_l, itlb_l, itlbna_l, dtlb_l, dtlbna_l, tlbd_l,
256	  ibtlb_g, NULL, pbtlb_g, hpti_l}, */
257#endif
258#ifdef HP8000_CPU
259	{ "PCXU",  hpcxu, 0x20, HPPA_FTRS_W32B|HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
260	  desidhash_g, itlb_l, itlbna_l, dtlb_l, dtlbna_l, tlbd_l,
261	  ibtlb_g, NULL, pbtlb_g, hpti_g},
262#endif
263#ifdef HP8200_CPU
264/* HOW?	{ "PCXU2", hpcxu2,0x20, HPPA_FTRS_W32B|HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
265	  desidhash_g, itlb_l, itlbna_l, dtlb_l, dtlbna_l, tlbd_l,
266	  ibtlb_g, NULL, pbtlb_g, hpti_g}, */
267#endif
268#ifdef HP8500_CPU
269/* HOW?	{ "PCXW",  hpcxw, 0x20, HPPA_FTRS_W32B|HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
270	  desidhash_g, itlb_l, itlbna_l, dtlb_l, dtlbna_l, tlbd_l,
271	  ibtlb_g, NULL, pbtlb_g, hpti_g}, */
272#endif
273#ifdef HP8600_CPU
274/* HOW?	{ "PCXW+", hpcxw, 0x20, HPPA_FTRS_W32B|HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
275	  desidhash_g, itlb_l, itlbna_l, dtlb_l, dtlbna_l, tlbd_l,
276	  ibtlb_g, NULL, pbtlb_g, hpti_g}, */
277#endif
278	{ "", 0 }
279};
280
281void
282hppa_init(start)
283	paddr_t start;
284{
285	struct pdc_model pdc_model PDC_ALIGNMENT;
286	extern int kernel_text;
287	vaddr_t v, v1;
288	int error, cpu_features = 0;
289
290	pdc_init();	/* init PDC iface, so we can call em easy */
291
292	cpu_hzticks = (PAGE0->mem_10msec * 100) / hz;
293	delay_init();	/* calculate cpu clock ratio */
294
295	/* cache parameters */
296	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_DFLT,
297	    &pdc_cache)) < 0) {
298#ifdef DEBUG
299		printf("WARNING: PDC_CACHE error %d\n", error);
300#endif
301	}
302
303	dcache_line_mask = pdc_cache.dc_conf.cc_line * 16 - 1;
304	dcache_stride = pdc_cache.dc_stride;
305	icache_line_mask = pdc_cache.ic_conf.cc_line * 16 - 1;
306	icache_stride = pdc_cache.ic_stride;
307
308	/* cache coherence params (pbably available for 8k only) */
309	error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_SETCS,
310	    &pdc_coherence, 1, 1, 1, 1);
311#ifdef DEBUG
312	printf ("PDC_CACHE_SETCS: %d, %d, %d, %d (%d)\n",
313	    pdc_coherence.ia_cst, pdc_coherence.da_cst,
314	    pdc_coherence.ita_cst, pdc_coherence.dta_cst, error);
315#endif
316	error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_GETSPIDB,
317	    &pdc_spidbits, 0, 0, 0, 0);
318	printf("SPID bits: 0x%x, error = %d\n", pdc_spidbits.spidbits, error);
319
320	/* setup hpmc handler */
321	{
322		extern u_int hpmc_v[];	/* from locore.s */
323		register u_int *p = hpmc_v;
324
325		if (pdc_call((iodcio_t)pdc, 0, PDC_INSTR, PDC_INSTR_DFLT, p))
326			*p = 0x08000240;
327
328		p[6] = (u_int)&hpmc_dump;
329		p[7] = 32;
330		p[5] = -(p[0] + p[1] + p[2] + p[3] + p[4] + p[6] + p[7]);
331	}
332
333	{
334		extern u_int hppa_toc[], hppa_toc_end[];
335		register u_int cksum, *p;
336
337		for (cksum = 0, p = hppa_toc; p < hppa_toc_end; p++)
338			cksum += *p;
339
340		*p = cksum;
341		PAGE0->ivec_toc = (int (*)(void))hppa_toc;
342		PAGE0->ivec_toclen = (hppa_toc_end - hppa_toc + 1) * 4;
343	}
344
345	{
346		extern u_int hppa_pfr[], hppa_pfr_end[];
347		register u_int cksum, *p;
348
349		for (cksum = 0, p = hppa_pfr; p < hppa_pfr_end; p++)
350			cksum += *p;
351
352		*p = cksum;
353		PAGE0->ivec_mempf = (int (*)(void))hppa_pfr;
354		PAGE0->ivec_mempflen = (hppa_pfr_end - hppa_pfr + 1) * 4;
355	}
356
357	/* may the scientific guessing begin */
358	cpu_features = 0;
359
360	/* identify system type */
361	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_MODEL, PDC_MODEL_INFO,
362	    &pdc_model)) < 0) {
363#ifdef DEBUG
364		printf("WARNING: PDC_MODEL error %d\n", error);
365#endif
366		pdc_model.hvers = 0;
367	}
368
369	/* BTLB params */
370	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB,
371	    PDC_BTLB_DEFAULT, &pdc_btlb)) < 0) {
372#ifdef DEBUG
373		printf("WARNING: PDC_BTLB error %d", error);
374#endif
375	} else {
376#ifdef BTLBDEBUG
377		printf("btlb info: minsz=%d, maxsz=%d\n",
378		    pdc_btlb.min_size, pdc_btlb.max_size);
379		printf("btlb fixed: i=%d, d=%d, c=%d\n",
380		    pdc_btlb.finfo.num_i,
381		    pdc_btlb.finfo.num_d,
382		    pdc_btlb.finfo.num_c);
383		printf("btlb varbl: i=%d, d=%d, c=%d\n",
384		    pdc_btlb.vinfo.num_i,
385		    pdc_btlb.vinfo.num_d,
386		    pdc_btlb.vinfo.num_c);
387#endif /* BTLBDEBUG */
388		/* purge TLBs and caches */
389		if (pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB,
390		    PDC_BTLB_PURGE_ALL) < 0)
391			printf("WARNING: BTLB purge failed\n");
392
393		if (pdc_btlb.finfo.num_c)
394			cpu_features |= HPPA_FTRS_BTLBU;
395	}
396
397	ptlball();
398	fcacheall();
399
400	totalphysmem = btoc(PAGE0->imm_max_mem);
401	resvmem = btoc(((vaddr_t)&kernel_text));
402	avail_end = ctob(totalphysmem);
403
404#if defined(HP7100LC_CPU) || defined(HP7300LC_CPU)
405	if (!pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_INFO, &pdc_hwtlb) &&
406	    pdc_hwtlb.min_size && pdc_hwtlb.max_size)
407		cpu_features |= HPPA_FTRS_HVT;
408	else {
409		printf("WARNING: no HPT support, fine!\n");
410		pmap_hptsize = 0;
411	}
412#endif
413
414	/*
415	 * Deal w/ CPU now
416	 */
417	{
418		const struct hppa_cpu_typed *p;
419
420		for (p = cpu_types;
421		    p->arch && p->features != cpu_features; p++);
422
423		if (!p->arch) {
424			printf("WARNING: UNKNOWN CPU TYPE; GOOD LUCK (%x)\n",
425			    cpu_features);
426			p = cpu_types;
427		}
428
429		{
430			/*
431			 * Ptrs to various tlb handlers, to be filled
432			 * based on cpu features.
433			 * from locore.S
434			 */
435			extern u_int trap_ep_T_TLB_DIRTY[];
436			extern u_int trap_ep_T_DTLBMISS[];
437			extern u_int trap_ep_T_DTLBMISSNA[];
438			extern u_int trap_ep_T_ITLBMISS[];
439			extern u_int trap_ep_T_ITLBMISSNA[];
440
441			cpu_type = p->type;
442			cpu_typename = p->name;
443			cpu_ibtlb_ins = p->ibtlbins;
444			cpu_dbtlb_ins = p->dbtlbins;
445			cpu_hpt_init = p->hptinit;
446			cpu_desidhash = p->desidhash;
447
448#define	LDILDO(t,f) ((t)[0] = (f)[0], (t)[1] = (f)[1])
449			LDILDO(trap_ep_T_TLB_DIRTY , p->tlbdh);
450			LDILDO(trap_ep_T_DTLBMISS  , p->dtlbh);
451			LDILDO(trap_ep_T_DTLBMISSNA, p->dtlbnah);
452			LDILDO(trap_ep_T_ITLBMISS  , p->itlbh);
453			LDILDO(trap_ep_T_ITLBMISSNA, p->itlbnah);
454#undef LDILDO
455		}
456	}
457
458	{
459		const char *p, *q;
460		char buf[32];
461		int lev, hv;
462
463		lev = 0xa + (*cpu_desidhash)();
464		hv = pdc_model.hvers >> 4;
465		if (!hv) {
466			p = "(UNKNOWN)";
467			q = lev == 0xa? "1.0" : "1.1";
468		} else {
469			p = hppa_mod_info(HPPA_TYPE_BOARD, hv);
470			if (!p) {
471				snprintf(buf, sizeof buf, "(UNKNOWN 0x%x)", hv);
472				p = buf;
473			}
474
475			switch (pdc_model.arch_rev) {
476			default:
477			case 0:
478				q = "1.0";
479#ifdef COMPAT_HPUX
480				cpu_model_hpux = HPUX_SYSCONF_CPUPA10;
481#endif
482				break;
483			case 4:
484				q = "1.1";
485#ifdef COMPAT_HPUX
486				cpu_model_hpux = HPUX_SYSCONF_CPUPA11;
487#endif
488				/* this one is just a 100MHz pcxl */
489				if (lev == 0x10)
490					lev = 0xc;
491				/* this one is a pcxl2 */
492				if (lev == 0x16)
493					lev = 0xe;
494				break;
495			case 8:
496				q = "2.0";
497#ifdef COMPAT_HPUX
498				cpu_model_hpux = HPUX_SYSCONF_CPUPA20;
499#endif
500				break;
501			}
502		}
503
504		snprintf(cpu_model, sizeof cpu_model,
505		    "HP 9000/%s PA-RISC %s%x", p, q, lev);
506	}
507
508	/* we hope this won't fail */
509	hppa_ex = extent_create("mem", 0x0, 0xffffffff, M_DEVBUF,
510	    (caddr_t)mem_ex_storage, sizeof(mem_ex_storage),
511	    EX_NOCOALESCE|EX_NOWAIT);
512	if (extent_alloc_region(hppa_ex, 0, (vaddr_t)PAGE0->imm_max_mem,
513	    EX_NOWAIT))
514		panic("cannot reserve main memory");
515
516	/*
517	 * Now allocate kernel dynamic variables
518	 */
519
520	/* buffer cache parameters */
521	if (bufpages == 0)
522		bufpages = totalphysmem / 100 *
523		    (totalphysmem <= 0x1000? 5 : bufcachepercent);
524
525	if (nbuf == 0)
526		nbuf = bufpages < 16? 16 : bufpages;
527
528	/* Restrict to at most 70% filled kvm */
529	if (nbuf * MAXBSIZE >
530	    (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) * 7 / 10)
531		nbuf = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) /
532		    MAXBSIZE * 7 / 10;
533
534	/* More buffer pages than fits into the buffers is senseless. */
535	if (bufpages > nbuf * MAXBSIZE / PAGE_SIZE)
536		bufpages = nbuf * MAXBSIZE / PAGE_SIZE;
537
538	v1 = v = hppa_round_page(start);
539#define valloc(name, type, num) (name) = (type *)v; v = (vaddr_t)((name)+(num))
540
541	valloc(buf, struct buf, nbuf);
542
543#ifdef SYSVMSG
544	valloc(msgpool, char, msginfo.msgmax);
545	valloc(msgmaps, struct msgmap, msginfo.msgseg);
546	valloc(msghdrs, struct msg, msginfo.msgtql);
547	valloc(msqids, struct msqid_ds, msginfo.msgmni);
548#endif
549#undef valloc
550
551	v = hppa_round_page(v);
552	bzero ((void *)v1, (v - v1));
553
554	msgbufp = (struct msgbuf *)v;
555	v += round_page(MSGBUFSIZE);
556	bzero(msgbufp, MSGBUFSIZE);
557
558	/* sets physmem */
559	pmap_bootstrap(v);
560
561	msgbufmapped = 1;
562	initmsgbuf((caddr_t)msgbufp, round_page(MSGBUFSIZE));
563
564	/* locate coprocessors and SFUs */
565	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_COPROC, PDC_COPROC_DFLT,
566	    &pdc_coproc)) < 0)
567		printf("WARNING: PDC_COPROC error %d\n", error);
568	else {
569		extern u_int fpu_enable;
570#ifdef DEBUG
571		printf("pdc_coproc: %x, %x\n", pdc_coproc.ccr_enable,
572		    pdc_coproc.ccr_present);
573#endif
574		fpu_enable = pdc_coproc.ccr_enable & CCR_MASK;
575	}
576
577	/* they say PDC_COPROC might turn fault light on */
578	pdc_call((iodcio_t)pdc, 0, PDC_CHASSIS, PDC_CHASSIS_DISP,
579	    PDC_OSTAT(PDC_OSTAT_RUN) | 0xCEC0);
580
581#ifdef DDB
582	ddb_init();
583#endif
584	fcacheall();
585}
586
587void
588cpu_startup(void)
589{
590	vaddr_t minaddr, maxaddr;
591	vsize_t size;
592	int i, base, residual;
593#ifdef DEBUG
594	extern int pmapdebug;
595	int opmapdebug = pmapdebug;
596
597	pmapdebug = 0;
598#endif
599
600	/*
601	 * i won't understand a friend of mine,
602	 * who sat in a room full of artificial ice,
603	 * fogging the air w/ humid cries --
604	 *	WELCOME TO SUMMER!
605	 */
606	printf(version);
607
608	printf("%s\n", cpu_model);
609	printf("real mem = %d (%d reserved for PROM, %d used by OpenBSD)\n",
610	    ctob(totalphysmem), ctob(resvmem), ctob(physmem));
611
612	size = MAXBSIZE * nbuf;
613	if (uvm_map(kernel_map, &minaddr, round_page(size),
614	    NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE,
615	    UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_NORMAL, 0)))
616		panic("cpu_startup: cannot allocate VM for buffers");
617	buffers = (caddr_t)minaddr;
618	base = bufpages / nbuf;
619	residual = bufpages % nbuf;
620	for (i = 0; i < nbuf; i++) {
621		vaddr_t curbuf;
622		int cbpgs;
623
624		/*
625		 * First <residual> buffers get (base+1) physical pages
626		 * allocated for them.  The rest get (base) physical pages.
627		 *
628		 * The rest of each buffer occupies virtual space,
629		 * but has no physical memory allocated for it.
630		 */
631		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
632
633		for (cbpgs = base + (i < residual? 1 : 0); cbpgs--; ) {
634			struct vm_page *pg;
635
636			if ((pg = uvm_pagealloc(NULL, 0, NULL, 0)) == NULL)
637				panic("cpu_startup: not enough memory for "
638				    "buffer cache");
639			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
640			    UVM_PROT_RW);
641			curbuf += PAGE_SIZE;
642		}
643	}
644
645	/*
646	 * Allocate a submap for exec arguments.  This map effectively
647	 * limits the number of processes exec'ing at any time.
648	 */
649	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
650	    16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
651
652	/*
653	 * Allocate a submap for physio
654	 */
655	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
656	    VM_PHYS_SIZE, 0, FALSE, NULL);
657
658#ifdef DEBUG
659	pmapdebug = opmapdebug;
660#endif
661	printf("avail mem = %ld\n", ptoa(uvmexp.free));
662	printf("using %d buffers containing %d bytes of memory\n",
663	    nbuf, bufpages * PAGE_SIZE);
664
665	/*
666	 * Set up buffers, so they can be used to read disk labels.
667	 */
668	bufinit();
669	vmmap = uvm_km_valloc_wait(kernel_map, NBPG);
670
671	/*
672	 * Configure the system.
673	 */
674	if (boothowto & RB_CONFIG) {
675#ifdef BOOT_CONFIG
676		user_config();
677#else
678		printf("kernel does not support -c; continuing..\n");
679#endif
680	}
681}
682
683/*
684 * compute cpu clock ratio such as:
685 *	cpu_ticksnum / cpu_ticksdenom = t + delta
686 *	delta -> 0
687 */
688void
689delay_init(void)
690{
691	register u_int num, denom, delta, mdelta;
692
693	mdelta = UINT_MAX;
694	for (denom = 1; denom < 1000; denom++) {
695		num = (PAGE0->mem_10msec * denom) / 10000;
696		delta = num * 10000 / denom - PAGE0->mem_10msec;
697		if (!delta) {
698			cpu_ticksdenom = denom;
699			cpu_ticksnum = num;
700			break;
701		} else if (delta < mdelta) {
702			cpu_ticksdenom = denom;
703			cpu_ticksnum = num;
704		}
705	}
706}
707
708void
709delay(us)
710	u_int us;
711{
712	register u_int start, end, n;
713
714	mfctl(CR_ITMR, start);
715	while (us) {
716		n = min(1000, us);
717		end = start + n * cpu_ticksnum / cpu_ticksdenom;
718
719		/* N.B. Interval Timer may wrap around */
720		if (end < start)
721			do
722				mfctl(CR_ITMR, start);
723			while (start > end);
724
725		do
726			mfctl(CR_ITMR, start);
727		while (start < end);
728
729		us -= n;
730	}
731}
732
733void
734microtime(struct timeval *tv)
735{
736	u_int itmr;
737	int s;
738
739	s = splhigh();
740	tv->tv_sec  = time.tv_sec;
741	tv->tv_usec = time.tv_usec;
742
743	mfctl(CR_ITMR, itmr);
744	itmr -= cpu_itmr;
745	splx(s);
746
747	tv->tv_usec += itmr * cpu_ticksdenom / cpu_ticksnum;
748	if (tv->tv_usec > 1000000) {
749		tv->tv_usec -= 1000000;
750		tv->tv_sec++;
751	}
752}
753
754
755static __inline void
756fall(c_base, c_count, c_loop, c_stride, data)
757	int c_base, c_count, c_loop, c_stride, data;
758{
759	register int loop;
760
761	for (; c_count--; c_base += c_stride)
762		for (loop = c_loop; loop--; )
763			if (data)
764				fdce(0, c_base);
765			else
766				fice(0, c_base);
767}
768
769void
770fcacheall(void)
771{
772	/*
773	 * Flush the instruction, then data cache.
774	 */
775	fall(pdc_cache.ic_base, pdc_cache.ic_count, pdc_cache.ic_loop,
776	    pdc_cache.ic_stride, 0);
777	sync_caches();
778	fall(pdc_cache.dc_base, pdc_cache.dc_count, pdc_cache.dc_loop,
779	    pdc_cache.dc_stride, 1);
780	sync_caches();
781}
782
783void
784ptlball(void)
785{
786	register pa_space_t sp;
787	register int i, j, k;
788
789	/* instruction TLB */
790	sp = pdc_cache.it_sp_base;
791	for (i = 0; i < pdc_cache.it_sp_count; i++) {
792		register vaddr_t off = pdc_cache.it_off_base;
793		for (j = 0; j < pdc_cache.it_off_count; j++) {
794			for (k = 0; k < pdc_cache.it_loop; k++)
795				pitlbe(sp, off);
796			off += pdc_cache.it_off_stride;
797		}
798		sp += pdc_cache.it_sp_stride;
799	}
800
801	/* data TLB */
802	sp = pdc_cache.dt_sp_base;
803	for (i = 0; i < pdc_cache.dt_sp_count; i++) {
804		register vaddr_t off = pdc_cache.dt_off_base;
805		for (j = 0; j < pdc_cache.dt_off_count; j++) {
806			for (k = 0; k < pdc_cache.dt_loop; k++)
807				pdtlbe(sp, off);
808			off += pdc_cache.dt_off_stride;
809		}
810		sp += pdc_cache.dt_sp_stride;
811	}
812}
813
814int
815desidhash_g(void)
816{
817	/* TODO call PDC to disable SID hashing in the cache index */
818
819	return 0;
820}
821
822int
823hpti_g(hpt, hptsize)
824	vaddr_t hpt;
825	vsize_t hptsize;
826{
827	return pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_CONFIG,
828	    &pdc_hwtlb, hpt, hptsize, PDC_TLB_CURRPDE);
829}
830
831int
832pbtlb_g(i)
833	int i;
834{
835	return -1;
836}
837
838int
839ibtlb_g(i, sp, va, pa, sz, prot)
840	int i;
841	pa_space_t sp;
842	vaddr_t va;
843	paddr_t pa;
844	vsize_t sz;
845	u_int prot;
846{
847	int error;
848
849	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB, PDC_BTLB_INSERT,
850	    sp, va, pa, sz, prot, i)) < 0) {
851#ifdef BTLBDEBUG
852		printf("WARNING: BTLB insert failed (%d)\n", error);
853#endif
854	}
855	return error;
856}
857
858int
859btlb_insert(space, va, pa, lenp, prot)
860	pa_space_t space;
861	vaddr_t va;
862	paddr_t pa;
863	vsize_t *lenp;
864	u_int prot;
865{
866	static u_int32_t mask;
867	register vsize_t len;
868	register int error, i;
869
870	/* align size */
871	for (len = pdc_btlb.min_size << PGSHIFT; len < *lenp; len <<= 1);
872	len >>= PGSHIFT;
873	i = ffs(~mask) - 1;
874	if (len > pdc_btlb.max_size || i < 0) {
875#ifdef BTLBDEBUG
876		printf("btln_insert: too big (%u < %u < %u)\n",
877		    pdc_btlb.min_size, len, pdc_btlb.max_size);
878#endif
879		return -(ENOMEM);
880	}
881
882	mask |= 1 << i;
883	pa >>= PGSHIFT;
884	va >>= PGSHIFT;
885	/* check address alignment */
886	if (pa & (len - 1))
887		printf("WARNING: BTLB address misaligned pa=0x%x, len=0x%x\n",
888		    pa, len);
889
890	/* ensure IO space is uncached */
891	if ((pa & (HPPA_IOBEGIN >> PGSHIFT)) == (HPPA_IOBEGIN >> PGSHIFT))
892		prot |= TLB_UNCACHABLE;
893
894#ifdef BTLBDEBUG
895	printf("btlb_insert(%d): %x:%x=%x[%x,%x]\n", i, space, va, pa, len, prot);
896#endif
897	if ((error = (*cpu_dbtlb_ins)(i, space, va, pa, len, prot)) < 0)
898		return -(EINVAL);
899	*lenp = len << PGSHIFT;
900
901	return i;
902}
903
904int waittime = -1;
905
906void
907boot(howto)
908	int howto;
909{
910	/* If system is cold, just halt. */
911	if (cold)
912		howto |= RB_HALT;
913	else {
914
915		boothowto = howto | (boothowto & RB_HALT);
916
917		if (!(howto & RB_NOSYNC)) {
918			waittime = 0;
919			vfs_shutdown();
920			/*
921			 * If we've been adjusting the clock, the todr
922			 * will be out of synch; adjust it now unless
923			 * the system was sitting in ddb.
924			 */
925			if ((howto & RB_TIMEBAD) == 0)
926				resettodr();
927			else
928				printf("WARNING: not updating battery clock\n");
929		}
930
931		/* XXX probably save howto into stable storage */
932
933		splhigh();
934
935		if (howto & RB_DUMP)
936			dumpsys();
937
938		doshutdownhooks();
939	}
940
941	/* in case we came on powerfail interrupt */
942	if (cold_hook)
943		(*cold_hook)(HPPA_COLD_COLD);
944
945	if (howto & RB_HALT) {
946		if (howto & RB_POWERDOWN && cold_hook) {
947			printf("Powering off...");
948			DELAY(1000000);
949			(*cold_hook)(HPPA_COLD_OFF);
950			DELAY(1000000);
951		}
952
953		printf("System halted!\n");
954		DELAY(1000000);
955		__asm __volatile("stwas %0, 0(%1)"
956		    :: "r" (CMD_STOP), "r" (LBCAST_ADDR + iomod_command));
957	} else {
958		printf("rebooting...");
959		DELAY(1000000);
960		__asm __volatile(".export hppa_reset, entry\n\t"
961		    ".label hppa_reset");
962		__asm __volatile("stwas %0, 0(%1)"
963		    :: "r" (CMD_RESET), "r" (LBCAST_ADDR + iomod_command));
964	}
965
966	for(;;); /* loop while bus reset is comming up */
967	/* NOTREACHED */
968}
969
970u_long	dumpmag = 0x8fca0101;	/* magic number */
971int	dumpsize = 0;		/* pages */
972long	dumplo = 0;		/* blocks */
973
974/*
975 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
976 */
977int
978cpu_dumpsize(void)
979{
980	int size;
981
982	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
983	if (roundup(size, dbtob(1)) != dbtob(1))
984		return -1;
985
986	return 1;
987}
988
989/*
990 * Called from HPMC handler in locore
991 */
992void
993hpmc_dump(void)
994{
995	printf("HPMC\n");
996
997	cold = 0;
998	boot(RB_NOSYNC);
999}
1000
1001int
1002cpu_dump(void)
1003{
1004	long buf[dbtob(1) / sizeof (long)];
1005	kcore_seg_t	*segp;
1006	cpu_kcore_hdr_t	*cpuhdrp;
1007
1008	segp = (kcore_seg_t *)buf;
1009	cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp)) / sizeof (long)];
1010
1011	/*
1012	 * Generate a segment header.
1013	 */
1014	CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1015	segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
1016
1017	/*
1018	 * Add the machine-dependent header info
1019	 */
1020	/* nothing for now */
1021
1022	return (bdevsw[major(dumpdev)].d_dump)
1023	    (dumpdev, dumplo, (caddr_t)buf, dbtob(1));
1024}
1025
1026/*
1027 * Dump the kernel's image to the swap partition.
1028 */
1029#define	BYTES_PER_DUMP	NBPG
1030
1031void
1032dumpsys(void)
1033{
1034	int psize, bytes, i, n;
1035	register caddr_t maddr;
1036	register daddr_t blkno;
1037	register int (*dump)(dev_t, daddr_t, caddr_t, size_t);
1038	register int error;
1039
1040	/* Save registers
1041	savectx(&dumppcb); */
1042
1043	if (dumpsize == 0)
1044		dumpconf();
1045	if (dumplo <= 0) {
1046		printf("\ndump to dev %x not possible\n", dumpdev);
1047		return;
1048	}
1049	printf("\ndumping to dev %x, offset %ld\n", dumpdev, dumplo);
1050
1051	psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
1052	printf("dump ");
1053	if (psize == -1) {
1054		printf("area unavailable\n");
1055		return;
1056	}
1057
1058	if (!(error = cpu_dump())) {
1059
1060		bytes = ctob(totalphysmem);
1061		maddr = NULL;
1062		blkno = dumplo + cpu_dumpsize();
1063		dump = bdevsw[major(dumpdev)].d_dump;
1064		/* TODO block map the whole physical memory */
1065		for (i = 0; i < bytes; i += n) {
1066
1067			/* Print out how many MBs we are to go. */
1068			n = bytes - i;
1069			if (n && (n % (1024*1024)) == 0)
1070				printf("%d ", n / (1024 * 1024));
1071
1072			/* Limit size for next transfer. */
1073
1074			if (n > BYTES_PER_DUMP)
1075				n = BYTES_PER_DUMP;
1076
1077			if ((error = (*dump)(dumpdev, blkno, maddr, n)))
1078				break;
1079			maddr += n;
1080			blkno += btodb(n);
1081		}
1082	}
1083
1084	switch (error) {
1085	case ENXIO:	printf("device bad\n");			break;
1086	case EFAULT:	printf("device not ready\n");		break;
1087	case EINVAL:	printf("area improper\n");		break;
1088	case EIO:	printf("i/o error\n");			break;
1089	case EINTR:	printf("aborted from console\n");	break;
1090	case 0:		printf("succeeded\n");			break;
1091	default:	printf("error %d\n", error);		break;
1092	}
1093}
1094
1095/* bcopy(), error on fault */
1096int
1097kcopy(from, to, size)
1098	const void *from;
1099	void *to;
1100	size_t size;
1101{
1102	return spcopy(HPPA_SID_KERNEL, from, HPPA_SID_KERNEL, to, size);
1103}
1104
1105int
1106copystr(src, dst, size, lenp)
1107	const void *src;
1108	void *dst;
1109	size_t size;
1110	size_t *lenp;
1111{
1112	return spstrcpy(HPPA_SID_KERNEL, src, HPPA_SID_KERNEL, dst, size, lenp);
1113}
1114
1115int
1116copyinstr(src, dst, size, lenp)
1117	const void *src;
1118	void *dst;
1119	size_t size;
1120	size_t *lenp;
1121{
1122	return spstrcpy(curproc->p_addr->u_pcb.pcb_space, src,
1123	    HPPA_SID_KERNEL, dst, size, lenp);
1124}
1125
1126
1127int
1128copyoutstr(src, dst, size, lenp)
1129	const void *src;
1130	void *dst;
1131	size_t size;
1132	size_t *lenp;
1133{
1134	return spstrcpy(HPPA_SID_KERNEL, src,
1135	    curproc->p_addr->u_pcb.pcb_space, dst, size, lenp);
1136}
1137
1138
1139int
1140copyin(src, dst, size)
1141	const void *src;
1142	void *dst;
1143	size_t size;
1144{
1145	return spcopy(curproc->p_addr->u_pcb.pcb_space, src,
1146	    HPPA_SID_KERNEL, dst, size);
1147}
1148
1149int
1150copyout(src, dst, size)
1151	const void *src;
1152	void *dst;
1153	size_t size;
1154{
1155	return spcopy(HPPA_SID_KERNEL, src,
1156	    curproc->p_addr->u_pcb.pcb_space, dst, size);
1157}
1158
1159/*
1160 * Set registers on exec.
1161 */
1162void
1163setregs(p, pack, stack, retval)
1164	struct proc *p;
1165	struct exec_package *pack;
1166	u_long stack;
1167	register_t *retval;
1168{
1169	extern paddr_t fpu_curpcb;	/* from locore.S */
1170	struct trapframe *tf = p->p_md.md_regs;
1171	struct pcb *pcb = &p->p_addr->u_pcb;
1172	register_t zero;
1173#ifdef DEBUG
1174	/*extern int pmapdebug;*/
1175	/*pmapdebug = 13;
1176	printf("setregs(%p, %p, %x, %p), ep=%x, cr30=%x\n",
1177	    p, pack, stack, retval, pack->ep_entry, tf->tf_cr30);
1178	*/
1179#endif
1180	tf->tf_flags = TFF_SYS|TFF_LAST;
1181	tf->tf_iioq_tail = 4 +
1182	    (tf->tf_iioq_head = pack->ep_entry | HPPA_PC_PRIV_USER);
1183	tf->tf_rp = 0;
1184	tf->tf_arg0 = (u_long)PS_STRINGS;
1185	tf->tf_arg1 = tf->tf_arg2 = 0; /* XXX dynload stuff */
1186
1187	/* setup terminal stack frame */
1188	stack = (stack + 0x1f) & ~0x1f;
1189	tf->tf_r3 = stack;
1190	tf->tf_sp = stack += HPPA_FRAME_SIZE;
1191	zero = 0;
1192	copyout(&zero, (caddr_t)(stack - HPPA_FRAME_SIZE), sizeof(register_t));
1193	copyout(&zero, (caddr_t)(stack + HPPA_FRAME_CRP), sizeof(register_t));
1194
1195	/* reset any of the pending FPU exceptions */
1196	pcb->pcb_fpregs[0] = ((u_int64_t)HPPA_FPU_INIT) << 32;
1197	pcb->pcb_fpregs[1] = 0;
1198	pcb->pcb_fpregs[2] = 0;
1199	pcb->pcb_fpregs[3] = 0;
1200	fdcache(HPPA_SID_KERNEL, (vaddr_t)pcb->pcb_fpregs, 8 * 4);
1201	if (tf->tf_cr30 == fpu_curpcb) {
1202		fpu_curpcb = 0;
1203		/* force an fpu ctxsw, we'll not be hugged by the cpu_switch */
1204		mtctl(0, CR_CCR);
1205	}
1206
1207	retval[1] = 0;
1208}
1209
1210/*
1211 * Send an interrupt to process.
1212 */
1213void
1214sendsig(catcher, sig, mask, code, type, val)
1215	sig_t catcher;
1216	int sig, mask;
1217	u_long code;
1218	int type;
1219	union sigval val;
1220{
1221	struct proc *p = curproc;
1222	struct trapframe *tf = p->p_md.md_regs;
1223	struct sigacts *psp = p->p_sigacts;
1224	struct sigcontext ksc, *scp;
1225	siginfo_t ksi, *sip;
1226	int sss;
1227	register_t zero;
1228
1229#ifdef DEBUG
1230	if ((sigdebug & SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1231		printf("sendsig: %s[%d] sig %d catcher %p\n",
1232		    p->p_comm, p->p_pid, sig, catcher);
1233#endif
1234
1235	ksc.sc_onstack = psp->ps_sigstk.ss_flags & SS_ONSTACK;
1236
1237	/*
1238	 * Allocate space for the signal handler context.
1239	 */
1240	if ((psp->ps_flags & SAS_ALTSTACK) && !ksc.sc_onstack &&
1241	    (psp->ps_sigonstack & sigmask(sig))) {
1242		scp = (struct sigcontext *)psp->ps_sigstk.ss_sp;
1243		psp->ps_sigstk.ss_flags |= SS_ONSTACK;
1244	} else
1245		scp = (struct sigcontext *)tf->tf_sp;
1246
1247	sss = sizeof(*scp);
1248	sip = NULL;
1249	if (psp->ps_siginfo & sigmask(sig)) {
1250		initsiginfo(&ksi, sig, code, type, val);
1251		sip = (siginfo_t *)(scp + 1);
1252		if (copyout((caddr_t)&ksi, sip, sizeof(ksi)))
1253			sigexit(p, SIGILL);
1254		sss += sizeof(*sip);
1255	}
1256
1257	ksc.sc_mask = mask;
1258	ksc.sc_sp = tf->tf_sp;
1259	ksc.sc_fp = (register_t)scp + sss;
1260	ksc.sc_ps = tf->tf_ipsw;
1261	ksc.sc_pcoqh = tf->tf_iioq_head;
1262	ksc.sc_pcoqt = tf->tf_iioq_tail;
1263	bcopy(tf, &ksc.sc_tf, sizeof(ksc.sc_tf));
1264	if (copyout((caddr_t)&ksc, scp, sizeof(*scp)))
1265		sigexit(p, SIGILL);
1266
1267	sss += HPPA_FRAME_SIZE;
1268	zero = 0;
1269	if (copyout(&zero, (caddr_t)scp + sss - HPPA_FRAME_SIZE,
1270	    sizeof(register_t)) ||
1271	    copyout(&zero, (caddr_t)scp + sss + HPPA_FRAME_CRP,
1272	    sizeof(register_t)))
1273		sigexit(p, SIGILL);
1274
1275#ifdef DEBUG
1276	if ((sigdebug & SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1277		printf("sendsig(%d): sig %d scp %p fp %p sp %x\n",
1278		    p->p_pid, sig, scp, ksc.sc_fp, ksc.sc_sp);
1279#endif
1280
1281	tf->tf_arg0 = sig;
1282	tf->tf_arg1 = (register_t)sip;
1283	tf->tf_arg2 = tf->tf_r3 = (register_t)scp;
1284	tf->tf_arg3 = (register_t)catcher;
1285	tf->tf_sp = (register_t)scp + sss;
1286	tf->tf_iioq_head = HPPA_PC_PRIV_USER | p->p_sigcode;
1287	tf->tf_iioq_tail = tf->tf_iioq_head + 4;
1288	/* disable tracing in the trapframe */
1289
1290	/* TODO FPU */
1291
1292#ifdef DEBUG
1293	if ((sigdebug & SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1294		printf("sendsig(%d): pc %x, catcher %x\n", p->p_pid,
1295		    tf->tf_iioq_head, tf->tf_arg3);
1296#endif
1297}
1298
1299int
1300sys_sigreturn(p, v, retval)
1301	struct proc *p;
1302	void *v;
1303	register_t *retval;
1304{
1305	struct sys_sigreturn_args /* {
1306		syscallarg(struct sigcontext *) sigcntxp;
1307	} */ *uap = v;
1308	struct sigcontext *scp, ksc;
1309	struct trapframe *tf = p->p_md.md_regs;
1310	int error;
1311
1312	scp = SCARG(uap, sigcntxp);
1313#ifdef DEBUG
1314	if ((sigdebug & SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1315		printf("sigreturn: pid %d, scp %p\n", p->p_pid, scp);
1316#endif
1317
1318	if ((error = copyin((caddr_t)scp, (caddr_t)&ksc, sizeof ksc)))
1319		return (error);
1320
1321#define PSL_MBS (PSL_C|PSL_Q|PSL_P|PSL_D|PSL_I)
1322#define PSL_MBZ (PSL_Y|PSL_Z|PSL_S|PSL_X|PSL_M|PSL_R)
1323	if ((ksc.sc_ps & (PSL_MBS|PSL_MBZ)) != PSL_MBS)
1324		return (EINVAL);
1325
1326	if (ksc.sc_onstack)
1327		p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK;
1328	else
1329		p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK;
1330	p->p_sigmask = ksc.sc_mask &~ sigcantmask;
1331
1332	hppa_user2frame((struct trapframe *)&ksc.sc_tf, tf);
1333
1334	tf->tf_sp = ksc.sc_sp;
1335	tf->tf_iioq_head = ksc.sc_pcoqh | HPPA_PC_PRIV_USER;
1336	tf->tf_iioq_tail = ksc.sc_pcoqt | HPPA_PC_PRIV_USER;
1337	tf->tf_ipsw = ksc.sc_ps;
1338
1339	/* TODO FPU */
1340
1341#ifdef DEBUG
1342	if ((sigdebug & SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1343		printf("sigreturn(%d): returns\n", p->p_pid);
1344#endif
1345	return (EJUSTRETURN);
1346}
1347
1348void
1349hppa_user2frame(sf, tf)
1350	struct trapframe *sf, *tf;
1351{
1352	/* only restore r1-r31, sar */
1353	tf->tf_t1 = sf->tf_t1;		/* r22 */
1354	tf->tf_t2 = sf->tf_t2;		/* r21 */
1355	tf->tf_sp = sf->tf_sp;
1356	tf->tf_t3 = sf->tf_t3;		/* r20 */
1357
1358	tf->tf_sar = sf->tf_sar;
1359	tf->tf_r1 = sf->tf_r1;
1360	tf->tf_rp = sf->tf_rp;
1361	tf->tf_r3 = sf->tf_r3;
1362	tf->tf_r4 = sf->tf_r4;
1363	tf->tf_r5 = sf->tf_r5;
1364	tf->tf_r6 = sf->tf_r6;
1365	tf->tf_r7 = sf->tf_r7;
1366	tf->tf_r8 = sf->tf_r8;
1367	tf->tf_r9 = sf->tf_r9;
1368	tf->tf_r10 = sf->tf_r10;
1369	tf->tf_r11 = sf->tf_r11;
1370	tf->tf_r12 = sf->tf_r12;
1371	tf->tf_r13 = sf->tf_r13;
1372	tf->tf_r14 = sf->tf_r14;
1373	tf->tf_r15 = sf->tf_r15;
1374	tf->tf_r16 = sf->tf_r16;
1375	tf->tf_r17 = sf->tf_r17;
1376	tf->tf_r18 = sf->tf_r18;
1377	tf->tf_t4 = sf->tf_t4;		/* r19 */
1378	tf->tf_arg3 = sf->tf_arg3;	/* r23 */
1379	tf->tf_arg2 = sf->tf_arg2;	/* r24 */
1380	tf->tf_arg1 = sf->tf_arg1;	/* r25 */
1381	tf->tf_arg0 = sf->tf_arg0;	/* r26 */
1382	tf->tf_dp = sf->tf_dp;
1383	tf->tf_ret0 = sf->tf_ret0;
1384	tf->tf_ret1 = sf->tf_ret1;
1385	tf->tf_r31 = sf->tf_r31;
1386}
1387
1388/*
1389 * machine dependent system variables.
1390 */
1391int
1392cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1393	int *name;
1394	u_int namelen;
1395	void *oldp;
1396	size_t *oldlenp;
1397	void *newp;
1398	size_t newlen;
1399	struct proc *p;
1400{
1401	dev_t consdev;
1402	/* all sysctl names at this level are terminal */
1403	if (namelen != 1)
1404		return (ENOTDIR);	/* overloaded */
1405	switch (name[0]) {
1406	case CPU_CONSDEV:
1407		if (cn_tab != NULL)
1408			consdev = cn_tab->cn_dev;
1409		else
1410			consdev = NODEV;
1411		return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
1412		    sizeof consdev));
1413	default:
1414		return (EOPNOTSUPP);
1415	}
1416	/* NOTREACHED */
1417}
1418
1419
1420/*
1421 * consinit:
1422 * initialize the system console.
1423 */
1424void
1425consinit(void)
1426{
1427	static int initted;
1428
1429	if (!initted) {
1430		initted++;
1431		cninit();
1432	}
1433}
1434