machdep.c revision 1.189
1/*	$OpenBSD: machdep.c,v 1.189 2010/06/27 03:03:48 thib Exp $	*/
2
3/*
4 * Copyright (c) 1999-2003 Michael Shalayeff
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/signalvar.h>
32#include <sys/kernel.h>
33#include <sys/proc.h>
34#include <sys/buf.h>
35#include <sys/reboot.h>
36#include <sys/device.h>
37#include <sys/conf.h>
38#include <sys/file.h>
39#include <sys/timeout.h>
40#include <sys/malloc.h>
41#include <sys/mbuf.h>
42#include <sys/msgbuf.h>
43#include <sys/ioctl.h>
44#include <sys/tty.h>
45#include <sys/user.h>
46#include <sys/exec.h>
47#include <sys/sysctl.h>
48#include <sys/core.h>
49#include <sys/kcore.h>
50#include <sys/extent.h>
51
52#include <sys/mount.h>
53#include <sys/syscallargs.h>
54
55#include <uvm/uvm.h>
56#include <uvm/uvm_page.h>
57#include <uvm/uvm_swap.h>
58
59#include <dev/cons.h>
60
61#include <machine/pdc.h>
62#include <machine/iomod.h>
63#include <machine/psl.h>
64#include <machine/reg.h>
65#include <machine/cpufunc.h>
66#include <machine/autoconf.h>
67#include <machine/kcore.h>
68
69#ifdef COMPAT_HPUX
70#include <compat/hpux/hpux.h>
71#include <compat/hpux/hpux_sig.h>
72#include <compat/hpux/hpux_util.h>
73#include <compat/hpux/hpux_syscallargs.h>
74#include <machine/hpux_machdep.h>
75#endif
76
77#ifdef DDB
78#include <machine/db_machdep.h>
79#include <ddb/db_access.h>
80#include <ddb/db_sym.h>
81#include <ddb/db_extern.h>
82#endif
83
84#include <hppa/dev/cpudevs.h>
85
86/*
87 * Patchable buffer cache parameters
88 */
89#ifndef BUFCACHEPERCENT
90#define BUFCACHEPERCENT 10
91#endif /* BUFCACHEPERCENT */
92
93#ifdef BUFPAGES
94int bufpages = BUFPAGES;
95#else
96int bufpages = 0;
97#endif
98int bufcachepercent = BUFCACHEPERCENT;
99
100/*
101 * Different kinds of flags used throughout the kernel.
102 */
103int cold = 1;			/* unset when engine is up to go */
104extern int msgbufmapped;	/* set when safe to use msgbuf */
105
106/*
107 * cache configuration, for most machines is the same
108 * numbers, so it makes sense to do defines w/ numbers depending
109 * on configured cpu types in the kernel
110 */
111int icache_stride, icache_line_mask;
112int dcache_stride, dcache_line_mask;
113
114/*
115 * things to not kill
116 */
117volatile u_int8_t *machine_ledaddr;
118int machine_ledword, machine_leds;
119struct cpu_info cpu_info[HPPA_MAXCPUS];
120
121/*
122 * CPU params (should be the same for all cpus in the system)
123 */
124struct pdc_cache pdc_cache PDC_ALIGNMENT;
125struct pdc_btlb pdc_btlb PDC_ALIGNMENT;
126struct pdc_model pdc_model PDC_ALIGNMENT;
127
128	/* w/ a little deviation should be the same for all installed cpus */
129u_int	cpu_ticksnum, cpu_ticksdenom;
130
131	/* exported info */
132char	machine[] = MACHINE;
133char	cpu_model[128];
134enum hppa_cpu_type cpu_type;
135const char *cpu_typename;
136int	cpu_hvers;
137u_int	fpu_version;
138#ifdef COMPAT_HPUX
139int	cpu_model_hpux;	/* contains HPUX_SYSCONF_CPU* kind of value */
140#endif
141
142int	led_blink;
143
144/*
145 * exported methods for cpus
146 */
147int (*cpu_desidhash)(void);
148int (*cpu_hpt_init)(vaddr_t hpt, vsize_t hptsize);
149int (*cpu_ibtlb_ins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
150	    vsize_t sz, u_int prot);
151int (*cpu_dbtlb_ins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
152	    vsize_t sz, u_int prot);
153
154dev_t	bootdev;
155int	physmem, resvmem, resvphysmem, esym;
156paddr_t	avail_end;
157
158#ifdef MULTIPROCESSOR
159struct mutex mtx_atomic = MUTEX_INITIALIZER(IPL_NONE);
160#endif
161
162/*
163 * Things for MI glue to stick on.
164 */
165struct user *proc0paddr;
166long mem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(64) / sizeof(long)];
167struct extent *hppa_ex;
168struct pool hppa_fppl;
169struct fpreg proc0fpregs;
170struct consdev *cn_tab;
171
172struct vm_map *exec_map = NULL;
173struct vm_map *phys_map = NULL;
174/* Virtual page frame for /dev/mem (see mem.c) */
175vaddr_t vmmap;
176
177void delay_init(void);
178static __inline void fall(int, int, int, int, int);
179void dumpsys(void);
180void hpmc_dump(void);
181void cpuid(void);
182void blink_led_timeout(void *);
183
184/*
185 * safepri is a safe priority for sleep to set for a spin-wait
186 * during autoconfiguration or after a panic.
187 */
188int   safepri = 0;
189
190/*
191 * wide used hardware params
192 */
193struct pdc_hwtlb pdc_hwtlb PDC_ALIGNMENT;
194struct pdc_coproc pdc_coproc PDC_ALIGNMENT;
195struct pdc_coherence pdc_coherence PDC_ALIGNMENT;
196struct pdc_spidb pdc_spidbits PDC_ALIGNMENT;
197struct pdc_model pdc_model PDC_ALIGNMENT;
198
199#ifdef DEBUG
200int sigdebug = 0;
201pid_t sigpid = 0;
202#define SDB_FOLLOW	0x01
203#endif
204
205struct uvm_constraint_range  dma_constraint = { 0x0, (paddr_t)-1 };
206struct uvm_constraint_range *uvm_md_constraints[] = { NULL };
207
208/*
209 * Whatever CPU types we support
210 */
211extern const u_int itlb_x[], itlbna_x[], dtlb_x[], dtlbna_x[], tlbd_x[];
212extern const u_int itlb_s[], itlbna_s[], dtlb_s[], dtlbna_s[], tlbd_s[];
213extern const u_int itlb_t[], itlbna_t[], dtlb_t[], dtlbna_t[], tlbd_t[];
214extern const u_int itlb_l[], itlbna_l[], dtlb_l[], dtlbna_l[], tlbd_l[];
215extern const u_int itlb_u[], itlbna_u[], dtlb_u[], dtlbna_u[], tlbd_u[];
216int iibtlb_s(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
217    vsize_t sz, u_int prot);
218int idbtlb_s(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
219    vsize_t sz, u_int prot);
220int ibtlb_t(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
221    vsize_t sz, u_int prot);
222int ibtlb_l(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
223    vsize_t sz, u_int prot);
224int ibtlb_u(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
225    vsize_t sz, u_int prot);
226int ibtlb_g(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
227    vsize_t sz, u_int prot);
228int pbtlb_g(int i);
229int pbtlb_u(int i);
230int hpti_l(vaddr_t, vsize_t);
231int hpti_u(vaddr_t, vsize_t);
232int hpti_g(vaddr_t, vsize_t);
233int desidhash_x(void);
234int desidhash_s(void);
235int desidhash_t(void);
236int desidhash_l(void);
237int desidhash_u(void);
238const struct hppa_cpu_typed {
239	char name[8];
240	enum hppa_cpu_type type;
241	int  cpuid;
242	int  features;
243	int  patch;
244	int  (*desidhash)(void);
245	int  (*dbtlbins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
246	     vsize_t sz, u_int prot);
247	int  (*ibtlbins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
248	     vsize_t sz, u_int prot);
249	int  (*btlbprg)(int i);
250	int  (*hptinit)(vaddr_t hpt, vsize_t hptsize);
251} cpu_types[] = {
252#ifdef HP7000_CPU
253	{ "PCXS",  hpcxs,  0, 0, 3, desidhash_s, ibtlb_g, NULL, pbtlb_g},
254#endif
255#ifdef HP7100_CPU
256	{ "PCXT",  hpcxt, 0, HPPA_FTRS_BTLBU,
257	  2, desidhash_t, ibtlb_g, NULL, pbtlb_g},
258#endif
259#ifdef HP7200_CPU
260	{ "PCXT'", hpcxta,HPPA_CPU_PCXT2, HPPA_FTRS_BTLBU,
261	  2, desidhash_t, ibtlb_g, NULL, pbtlb_g},
262#endif
263#ifdef HP7100LC_CPU
264	{ "PCXL",  hpcxl, HPPA_CPU_PCXL, HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
265	  0, desidhash_l, ibtlb_g, NULL, pbtlb_g, hpti_g},
266#endif
267#ifdef HP7300LC_CPU
268	{ "PCXL2", hpcxl2,HPPA_CPU_PCXL2, HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
269	  0, desidhash_l, ibtlb_g, NULL, pbtlb_g, hpti_g},
270#endif
271#ifdef HP8000_CPU
272	{ "PCXU",  hpcxu, HPPA_CPU_PCXU, HPPA_FTRS_W32B,
273	  4, desidhash_u, ibtlb_u, NULL, pbtlb_u },
274#endif
275#ifdef HP8200_CPU
276	{ "PCXU+", hpcxu2,HPPA_CPU_PCXUP, HPPA_FTRS_W32B,
277	  4, desidhash_u, ibtlb_u, NULL, pbtlb_u },
278#endif
279#ifdef HP8500_CPU
280	{ "PCXW",  hpcxw, HPPA_CPU_PCXW, HPPA_FTRS_W32B,
281	  4, desidhash_u, ibtlb_u, NULL, pbtlb_u },
282#endif
283#ifdef HP8700_CPU
284	{ "PCXW2",  hpcxw, HPPA_CPU_PCXW2, HPPA_FTRS_W32B,
285	  4, desidhash_u, ibtlb_u, NULL, pbtlb_u },
286#endif
287	{ "", 0 }
288};
289
290int	hppa_cpuspeed(int *mhz);
291
292int
293hppa_cpuspeed(int *mhz)
294{
295	*mhz = PAGE0->mem_10msec / 10000;
296
297	return (0);
298}
299
300void
301hppa_init(start)
302	paddr_t start;
303{
304	extern u_long cpu_hzticks;
305	extern int kernel_text;
306	struct cpu_info *ci;
307	int error;
308
309	pdc_init();	/* init PDC iface, so we can call em easy */
310
311	cpu_hzticks = (PAGE0->mem_10msec * 100) / hz;
312	delay_init();	/* calculate cpu clock ratio */
313
314	/* cache parameters */
315	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_DFLT,
316	    &pdc_cache)) < 0) {
317#ifdef DEBUG
318		printf("WARNING: PDC_CACHE error %d\n", error);
319#endif
320	}
321
322	dcache_line_mask = pdc_cache.dc_conf.cc_line * 16 - 1;
323	dcache_stride = pdc_cache.dc_stride;
324	icache_line_mask = pdc_cache.ic_conf.cc_line * 16 - 1;
325	icache_stride = pdc_cache.ic_stride;
326
327	/* cache coherence params (pbably available for 8k only) */
328	error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_SETCS,
329	    &pdc_coherence, 1, 1, 1, 1);
330#ifdef DEBUG
331	printf ("PDC_CACHE_SETCS: %d, %d, %d, %d (%d)\n",
332	    pdc_coherence.ia_cst, pdc_coherence.da_cst,
333	    pdc_coherence.ita_cst, pdc_coherence.dta_cst, error);
334#endif
335	error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_GETSPIDB,
336	    &pdc_spidbits, 0, 0, 0, 0);
337	printf("SPID bits: 0x%x, error = %d\n", pdc_spidbits.spidbits, error);
338
339	/* setup hpmc handler */
340	{
341		extern u_int hpmc_v[];	/* from locore.s */
342		register u_int *p = hpmc_v;
343
344		if (pdc_call((iodcio_t)pdc, 0, PDC_INSTR, PDC_INSTR_DFLT, p))
345			*p = 0x08000240;
346
347		p[6] = (u_int)&hpmc_dump;
348		p[7] = 32;
349		p[5] = -(p[0] + p[1] + p[2] + p[3] + p[4] + p[6] + p[7]);
350	}
351
352	{
353		extern u_int hppa_toc[], hppa_toc_end[];
354		register u_int cksum, *p;
355
356		for (cksum = 0, p = hppa_toc; p < hppa_toc_end; p++)
357			cksum += *p;
358
359		*p = cksum;
360		PAGE0->ivec_toc = (u_int)hppa_toc;
361		PAGE0->ivec_toclen = (hppa_toc_end - hppa_toc + 1) * 4;
362	}
363
364	{
365		extern u_int hppa_pfr[], hppa_pfr_end[];
366		register u_int cksum, *p;
367
368		for (cksum = 0, p = hppa_pfr; p < hppa_pfr_end; p++)
369			cksum += *p;
370
371		*p = cksum;
372		PAGE0->ivec_mempf = (u_int)hppa_pfr;
373		PAGE0->ivec_mempflen = (hppa_pfr_end - hppa_pfr + 1) * 4;
374	}
375
376	ci = curcpu();
377	ci->ci_cpl = IPL_NESTED;
378	ci->ci_psw = PSL_Q | PSL_P | PSL_C | PSL_D;
379
380	cpuid();
381	ptlball();
382	ficacheall();
383	fdcacheall();
384
385	avail_end = trunc_page(PAGE0->imm_max_mem);
386	/*
387	 * XXX For some reason, using any physical memory above the
388	 * 2GB marker causes memory corruption on PA-RISC 2.0
389	 * machines.  Cap physical memory at 2GB for now.
390	 */
391#if 0
392	if (avail_end > SYSCALLGATE)
393		avail_end = SYSCALLGATE;
394#else
395	if (avail_end > 0x80000000)
396		avail_end = 0x80000000;
397#endif
398	physmem = atop(avail_end);
399	resvmem = atop(((vaddr_t)&kernel_text));
400
401	/* we hope this won't fail */
402	hppa_ex = extent_create("mem", 0x0, 0xffffffff, M_DEVBUF,
403	    (caddr_t)mem_ex_storage, sizeof(mem_ex_storage),
404	    EX_NOCOALESCE|EX_NOWAIT);
405	if (extent_alloc_region(hppa_ex, 0, (vaddr_t)PAGE0->imm_max_mem,
406	    EX_NOWAIT))
407		panic("cannot reserve main memory");
408
409	/* sets resvphysmem */
410	pmap_bootstrap(round_page(start));
411
412	/* space has been reserved in pmap_bootstrap() */
413	initmsgbuf((caddr_t)(ptoa(physmem) - round_page(MSGBUFSIZE)),
414	    round_page(MSGBUFSIZE));
415
416	/* they say PDC_COPROC might turn fault light on */
417	pdc_call((iodcio_t)pdc, 0, PDC_CHASSIS, PDC_CHASSIS_DISP,
418	    PDC_OSTAT(PDC_OSTAT_RUN) | 0xCEC0);
419
420	cpu_cpuspeed = &hppa_cpuspeed;
421#ifdef DDB
422	ddb_init();
423#endif
424	ficacheall();
425	fdcacheall();
426
427	proc0paddr->u_pcb.pcb_fpregs = &proc0fpregs;
428	pool_init(&hppa_fppl, sizeof(struct fpreg), 16, 0, 0, "hppafp", NULL);
429}
430
431void
432cpuid()
433{
434	/*
435	 * Ptrs to various tlb handlers, to be filled
436	 * based on cpu features.
437	 * from locore.S
438	 */
439	extern u_int trap_ep_T_TLB_DIRTY[];
440	extern u_int trap_ep_T_DTLBMISS[];
441	extern u_int trap_ep_T_DTLBMISSNA[];
442	extern u_int trap_ep_T_ITLBMISS[];
443	extern u_int trap_ep_T_ITLBMISSNA[];
444
445	extern u_int fpu_enable;
446	extern int cpu_fpuena;
447	struct pdc_cpuid pdc_cpuid PDC_ALIGNMENT;
448	const struct hppa_cpu_typed *p = NULL;
449	u_int cpu_features;
450	int error;
451
452	/* may the scientific guessing begin */
453	cpu_features = 0;
454	cpu_type = 0;
455
456	/* identify system type */
457	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_MODEL, PDC_MODEL_INFO,
458	    &pdc_model)) < 0) {
459#ifdef DEBUG
460		printf("WARNING: PDC_MODEL error %d\n", error);
461#endif
462		pdc_model.hvers = 0;
463	}
464
465	bzero(&pdc_cpuid, sizeof(pdc_cpuid));
466	if (pdc_call((iodcio_t)pdc, 0, PDC_MODEL, PDC_MODEL_CPUID,
467	    &pdc_cpuid, 0, 0, 0, 0) >= 0) {
468
469		/* patch for old 8200 */
470		if (pdc_cpuid.version == HPPA_CPU_PCXU &&
471		    pdc_cpuid.revision > 0x0d)
472			pdc_cpuid.version = HPPA_CPU_PCXUP;
473
474		cpu_type = pdc_cpuid.version;
475	}
476
477	/* locate coprocessors and SFUs */
478	bzero(&pdc_coproc, sizeof(pdc_coproc));
479	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_COPROC, PDC_COPROC_DFLT,
480	    &pdc_coproc, 0, 0, 0, 0)) < 0) {
481		printf("WARNING: PDC_COPROC error %d\n", error);
482		cpu_fpuena = 0;
483	} else {
484		printf("pdc_coproc: 0x%x, 0x%x; model %x rev %x\n",
485		    pdc_coproc.ccr_enable, pdc_coproc.ccr_present,
486		    pdc_coproc.fpu_model, pdc_coproc.fpu_revision);
487		fpu_enable = pdc_coproc.ccr_enable & CCR_MASK;
488		cpu_fpuena = 1;
489
490		/* a kludge to detect PCXW */
491		if (pdc_coproc.fpu_model == HPPA_FPU_PCXW)
492			cpu_type = HPPA_CPU_PCXW;
493	}
494
495	/* BTLB params */
496	if (cpu_type < HPPA_CPU_PCXU &&
497	    (error = pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB,
498	     PDC_BTLB_DEFAULT, &pdc_btlb)) < 0) {
499#ifdef DEBUG
500		printf("WARNING: PDC_BTLB error %d\n", error);
501#endif
502	} else {
503#ifdef BTLBDEBUG
504		printf("btlb info: minsz=%d, maxsz=%d\n",
505		    pdc_btlb.min_size, pdc_btlb.max_size);
506		printf("btlb fixed: i=%d, d=%d, c=%d\n",
507		    pdc_btlb.finfo.num_i,
508		    pdc_btlb.finfo.num_d,
509		    pdc_btlb.finfo.num_c);
510		printf("btlb varbl: i=%d, d=%d, c=%d\n",
511		    pdc_btlb.vinfo.num_i,
512		    pdc_btlb.vinfo.num_d,
513		    pdc_btlb.vinfo.num_c);
514#endif /* BTLBDEBUG */
515		/* purge TLBs and caches */
516		if (pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB,
517		    PDC_BTLB_PURGE_ALL) < 0)
518			printf("WARNING: BTLB purge failed\n");
519
520		if (pdc_btlb.finfo.num_c)
521			cpu_features |= HPPA_FTRS_BTLBU;
522	}
523
524	if (!pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_INFO, &pdc_hwtlb) &&
525	    pdc_hwtlb.min_size && pdc_hwtlb.max_size) {
526		cpu_features |= HPPA_FTRS_HVT;
527		if (pmap_hptsize > pdc_hwtlb.max_size)
528			pmap_hptsize = pdc_hwtlb.max_size;
529		else if (pmap_hptsize && pmap_hptsize < pdc_hwtlb.min_size)
530			pmap_hptsize = pdc_hwtlb.min_size;
531	} else {
532#ifdef DEBUG
533		printf("WARNING: no HPT support, fine!\n");
534#endif
535		pmap_hptsize = 0;
536	}
537
538	if (cpu_type)
539		for (p = cpu_types; p->name[0] && p->cpuid != cpu_type; p++);
540	else
541		for (p = cpu_types;
542		    p->name[0] && p->features != cpu_features; p++);
543
544	if (!p->name[0]) {
545		printf("WARNING: UNKNOWN CPU TYPE; GOOD LUCK "
546		    "(type 0x%x, features 0x%x)\n", cpu_type, cpu_features);
547		p = cpu_types;
548	} else if ((p->type == hpcxl || p->type == hpcxl2) && !fpu_enable) {
549		/* we know PCXL and PCXL2 do not exist w/o FPU */
550		fpu_enable = 0xc0;
551		cpu_fpuena = 1;
552	}
553
554	/*
555	 * TODO: HPT on 7200 is not currently supported
556	 */
557	if (pmap_hptsize && p->type != hpcxl && p->type != hpcxl2)
558		pmap_hptsize = 0;
559
560	cpu_type = p->type;
561	cpu_typename = p->name;
562	cpu_ibtlb_ins = p->ibtlbins;
563	cpu_dbtlb_ins = p->dbtlbins;
564	cpu_hpt_init = p->hptinit;
565	cpu_desidhash = p->desidhash;
566
567	/* patch tlb handler branches */
568	if (p->patch) {
569		trap_ep_T_TLB_DIRTY [0] = trap_ep_T_TLB_DIRTY [p->patch];
570		trap_ep_T_DTLBMISS  [0] = trap_ep_T_DTLBMISS  [p->patch];
571		trap_ep_T_DTLBMISSNA[0] = trap_ep_T_DTLBMISSNA[p->patch];
572		trap_ep_T_ITLBMISS  [0] = trap_ep_T_ITLBMISS  [p->patch];
573		trap_ep_T_ITLBMISSNA[0] = trap_ep_T_ITLBMISSNA[p->patch];
574	}
575
576	/* force strong ordering for now */
577	if (p->features & HPPA_FTRS_W32B) {
578		curcpu()->ci_psw |= PSL_O;
579	}
580
581	{
582		const char *p, *q;
583		char buf[32];
584		int lev;
585
586		lev = 0xa + (*cpu_desidhash)();
587		cpu_hvers = pdc_model.hvers >> 4;
588		if (!cpu_hvers) {
589			p = "(UNKNOWN)";
590			q = lev == 0xa? "1.0" : "1.1";
591		} else {
592			p = hppa_mod_info(HPPA_TYPE_BOARD, cpu_hvers);
593			if (!p) {
594				snprintf(buf, sizeof buf, "(UNKNOWN 0x%x)",
595				    cpu_hvers);
596				p = buf;
597			}
598
599			switch (pdc_model.arch_rev) {
600			default:
601			case 0:
602				q = "1.0";
603#ifdef COMPAT_HPUX
604				cpu_model_hpux = HPUX_SYSCONF_CPUPA10;
605#endif
606				break;
607			case 4:
608				q = "1.1";
609#ifdef COMPAT_HPUX
610				cpu_model_hpux = HPUX_SYSCONF_CPUPA11;
611#endif
612				/* this one is just a 100MHz pcxl */
613				if (lev == 0x10)
614					lev = 0xc;
615				/* this one is a pcxl2 */
616				if (lev == 0x16)
617					lev = 0xe;
618				break;
619			case 8:
620				q = "2.0";
621#ifdef COMPAT_HPUX
622				cpu_model_hpux = HPUX_SYSCONF_CPUPA20;
623#endif
624				break;
625			}
626		}
627
628		snprintf(cpu_model, sizeof cpu_model,
629		    "HP 9000/%s PA-RISC %s%x", p, q, lev);
630	}
631#ifdef DEBUG
632	printf("cpu: %s\n", cpu_model);
633#endif
634}
635
636void
637cpu_startup(void)
638{
639	vaddr_t minaddr, maxaddr;
640
641	/*
642	 * i won't understand a friend of mine,
643	 * who sat in a room full of artificial ice,
644	 * fogging the air w/ humid cries --
645	 *	WELCOME TO SUMMER!
646	 */
647	printf(version);
648
649	printf("%s\n", cpu_model);
650	printf("real mem = %u (%uMB)\n", ptoa(physmem),
651	    ptoa(physmem) / 1024 / 1024);
652	printf("rsvd mem = %u (%uKB)\n", ptoa(resvmem), ptoa(resvmem) / 1024);
653
654	/*
655	 * Allocate a submap for exec arguments.  This map effectively
656	 * limits the number of processes exec'ing at any time.
657	 */
658	minaddr = vm_map_min(kernel_map);
659	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
660	    16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
661
662	/*
663	 * Allocate a submap for physio
664	 */
665	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
666	    VM_PHYS_SIZE, 0, FALSE, NULL);
667
668	printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free),
669	    ptoa(uvmexp.free) / 1024 / 1024);
670
671	/*
672	 * Set up buffers, so they can be used to read disk labels.
673	 */
674	bufinit();
675	vmmap = uvm_km_valloc_wait(kernel_map, NBPG);
676
677	/*
678	 * Configure the system.
679	 */
680	if (boothowto & RB_CONFIG) {
681#ifdef BOOT_CONFIG
682		user_config();
683#else
684		printf("kernel does not support -c; continuing..\n");
685#endif
686	}
687}
688
689/*
690 * compute cpu clock ratio such as:
691 *	cpu_ticksnum / cpu_ticksdenom = t + delta
692 *	delta -> 0
693 */
694void
695delay_init(void)
696{
697	register u_int num, denom, delta, mdelta;
698
699	mdelta = UINT_MAX;
700	for (denom = 1; denom < 1000; denom++) {
701		num = (PAGE0->mem_10msec * denom) / 10000;
702		delta = num * 10000 / denom - PAGE0->mem_10msec;
703		if (!delta) {
704			cpu_ticksdenom = denom;
705			cpu_ticksnum = num;
706			break;
707		} else if (delta < mdelta) {
708			cpu_ticksdenom = denom;
709			cpu_ticksnum = num;
710			mdelta = delta;
711		}
712	}
713}
714
715void
716delay(us)
717	u_int us;
718{
719	register u_int start, end, n;
720
721	mfctl(CR_ITMR, start);
722	while (us) {
723		n = min(1000, us);
724		end = start + n * cpu_ticksnum / cpu_ticksdenom;
725
726		/* N.B. Interval Timer may wrap around */
727		if (end < start)
728			do
729				mfctl(CR_ITMR, start);
730			while (start > end);
731
732		do
733			mfctl(CR_ITMR, start);
734		while (start < end);
735
736		us -= n;
737	}
738}
739
740static __inline void
741fall(c_base, c_count, c_loop, c_stride, data)
742	int c_base, c_count, c_loop, c_stride, data;
743{
744	register int loop;
745
746	for (; c_count--; c_base += c_stride)
747		for (loop = c_loop; loop--; )
748			if (data)
749				fdce(0, c_base);
750			else
751				fice(0, c_base);
752}
753
754void
755ficacheall(void)
756{
757	/*
758	 * Flush the instruction, then data cache.
759	 */
760	fall(pdc_cache.ic_base, pdc_cache.ic_count, pdc_cache.ic_loop,
761	    pdc_cache.ic_stride, 0);
762	sync_caches();
763}
764
765void
766fdcacheall(void)
767{
768	fall(pdc_cache.dc_base, pdc_cache.dc_count, pdc_cache.dc_loop,
769	    pdc_cache.dc_stride, 1);
770	sync_caches();
771}
772
773void
774ptlball(void)
775{
776	register pa_space_t sp;
777	register int i, j, k;
778
779	/* instruction TLB */
780	sp = pdc_cache.it_sp_base;
781	for (i = 0; i < pdc_cache.it_sp_count; i++) {
782		register vaddr_t off = pdc_cache.it_off_base;
783		for (j = 0; j < pdc_cache.it_off_count; j++) {
784			for (k = 0; k < pdc_cache.it_loop; k++)
785				pitlbe(sp, off);
786			off += pdc_cache.it_off_stride;
787		}
788		sp += pdc_cache.it_sp_stride;
789	}
790
791	/* data TLB */
792	sp = pdc_cache.dt_sp_base;
793	for (i = 0; i < pdc_cache.dt_sp_count; i++) {
794		register vaddr_t off = pdc_cache.dt_off_base;
795		for (j = 0; j < pdc_cache.dt_off_count; j++) {
796			for (k = 0; k < pdc_cache.dt_loop; k++)
797				pdtlbe(sp, off);
798			off += pdc_cache.dt_off_stride;
799		}
800		sp += pdc_cache.dt_sp_stride;
801	}
802}
803
804int
805hpti_g(hpt, hptsize)
806	vaddr_t hpt;
807	vsize_t hptsize;
808{
809	return pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_CONFIG,
810	    &pdc_hwtlb, hpt, hptsize, PDC_TLB_CURRPDE);
811}
812
813int
814pbtlb_g(i)
815	int i;
816{
817	return -1;
818}
819
820int
821ibtlb_g(i, sp, va, pa, sz, prot)
822	int i;
823	pa_space_t sp;
824	vaddr_t va;
825	paddr_t pa;
826	vsize_t sz;
827	u_int prot;
828{
829	int error;
830
831	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB, PDC_BTLB_INSERT,
832	    sp, va, pa, sz, prot, i)) < 0) {
833#ifdef BTLBDEBUG
834		printf("WARNING: BTLB insert failed (%d)\n", error);
835#endif
836	}
837	return error;
838}
839
840int
841btlb_insert(space, va, pa, lenp, prot)
842	pa_space_t space;
843	vaddr_t va;
844	paddr_t pa;
845	vsize_t *lenp;
846	u_int prot;
847{
848	static u_int32_t mask;
849	register vsize_t len;
850	register int error, i, btlb_max;
851
852	if (!pdc_btlb.min_size && !pdc_btlb.max_size)
853		return -(ENXIO);
854
855	/*
856	 * On PCXS processors with split BTLB, we should theoretically
857	 * insert in the IBTLB (if executable mapping requested), and
858	 * into the DBTLB. The PDC documentation is very clear that
859	 * slot numbers are, in order, IBTLB, then DBTLB, then combined
860	 * BTLB.
861	 *
862	 * However it also states that ``successful completion may not mean
863	 * that the entire address range specified in the call has been
864	 * mapped in the block TLB. For both fixed range slots and variable
865	 * range slots, complete coverage of the address range specified
866	 * is not guaranteed. Only a portion of the address range specified
867	 * may get mapped as a result''.
868	 *
869	 * On an HP 9000/720 with PDC ROM v1.2, it turns out that IBTLB
870	 * entries are inserted as expected, but no DBTLB gets inserted
871	 * at all, despite PDC returning success.
872	 *
873	 * So play it dumb, and do not attempt to insert DBTLB entries at
874	 * all on split BTLB systems. Callers are supposed to be able to
875	 * cope with this.
876	 */
877
878	if (pdc_btlb.finfo.num_c == 0) {
879		if ((prot & TLB_EXECUTE) == 0)
880			return -(EINVAL);
881
882		btlb_max = pdc_btlb.finfo.num_i;
883	} else {
884		btlb_max = pdc_btlb.finfo.num_c;
885	}
886
887	/* align size */
888	for (len = pdc_btlb.min_size << PGSHIFT; len < *lenp; len <<= 1);
889	len >>= PGSHIFT;
890	i = ffs(~mask) - 1;
891	if (len > pdc_btlb.max_size || i < 0 || i >= btlb_max) {
892#ifdef BTLBDEBUG
893		printf("btln_insert: too big (%u < %u < %u)\n",
894		    pdc_btlb.min_size, len, pdc_btlb.max_size);
895#endif
896		return -(ENOMEM);
897	}
898
899	mask |= 1 << i;
900	pa >>= PGSHIFT;
901	va >>= PGSHIFT;
902	/* check address alignment */
903	if (pa & (len - 1)) {
904#ifdef BTLBDEBUG
905		printf("WARNING: BTLB address misaligned pa=0x%x, len=0x%x\n",
906		    pa, len);
907#endif
908		return -(ERANGE);
909	}
910
911	/* ensure IO space is uncached */
912	if ((pa & (HPPA_IOBEGIN >> PGSHIFT)) == (HPPA_IOBEGIN >> PGSHIFT))
913		prot |= TLB_UNCACHABLE;
914
915#ifdef BTLBDEBUG
916	printf("btlb_insert(%d): %x:%x=%x[%x,%x]\n",
917	    i, space, va, pa, len, prot);
918#endif
919	if ((error = (*cpu_dbtlb_ins)(i, space, va, pa, len, prot)) < 0)
920		return -(EINVAL);
921	*lenp = len << PGSHIFT;
922
923	return i;
924}
925
926int waittime = -1;
927
928void
929boot(howto)
930	int howto;
931{
932	/* If system is cold, just halt. */
933	if (cold) {
934		/* (Unless the user explicitly asked for reboot.) */
935		if ((howto & RB_USERREQ) == 0)
936			howto |= RB_HALT;
937	} else {
938
939		boothowto = howto | (boothowto & RB_HALT);
940
941		if (!(howto & RB_NOSYNC)) {
942			waittime = 0;
943			vfs_shutdown();
944			/*
945			 * If we've been adjusting the clock, the todr
946			 * will be out of synch; adjust it now unless
947			 * the system was sitting in ddb.
948			 */
949			if ((howto & RB_TIMEBAD) == 0)
950				resettodr();
951			else
952				printf("WARNING: not updating battery clock\n");
953		}
954
955		/* XXX probably save howto into stable storage */
956
957		uvm_shutdown();
958		splhigh();
959
960		if (howto & RB_DUMP)
961			dumpsys();
962
963		doshutdownhooks();
964	}
965
966	/* in case we came on powerfail interrupt */
967	if (cold_hook)
968		(*cold_hook)(HPPA_COLD_COLD);
969
970	if (howto & RB_HALT) {
971		if (howto & RB_POWERDOWN && cold_hook) {
972			printf("Powering off...");
973			DELAY(2000000);
974			(*cold_hook)(HPPA_COLD_OFF);
975			DELAY(1000000);
976		}
977
978		printf("System halted!\n");
979		DELAY(2000000);
980		__asm __volatile("stwas %0, 0(%1)"
981		    :: "r" (CMD_STOP), "r" (HPPA_LBCAST + iomod_command));
982	} else {
983		printf("rebooting...");
984		DELAY(2000000);
985
986		/* ask firmware to reset */
987                pdc_call((iodcio_t)pdc, 0, PDC_BROADCAST_RESET, PDC_DO_RESET);
988
989		/* forcably reset module if that fails */
990		__asm __volatile(".export hppa_reset, entry\n\t"
991		    ".label hppa_reset");
992		__asm __volatile("stwas %0, 0(%1)"
993		    :: "r" (CMD_RESET), "r" (HPPA_LBCAST + iomod_command));
994	}
995
996	for(;;); /* loop while bus reset is comming up */
997	/* NOTREACHED */
998}
999
1000u_long	dumpmag = 0x8fca0101;	/* magic number */
1001int	dumpsize = 0;		/* pages */
1002long	dumplo = 0;		/* blocks */
1003
1004/*
1005 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
1006 */
1007int
1008cpu_dumpsize(void)
1009{
1010	int size;
1011
1012	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
1013	if (roundup(size, dbtob(1)) != dbtob(1))
1014		return -1;
1015
1016	return 1;
1017}
1018
1019/*
1020 * Called from HPMC handler in locore
1021 */
1022void
1023hpmc_dump(void)
1024{
1025	printf("HPMC\n");
1026
1027	cold = 0;
1028	boot(RB_NOSYNC);
1029}
1030
1031int
1032cpu_dump(void)
1033{
1034	long buf[dbtob(1) / sizeof (long)];
1035	kcore_seg_t	*segp;
1036	cpu_kcore_hdr_t	*cpuhdrp;
1037
1038	segp = (kcore_seg_t *)buf;
1039	cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp)) / sizeof (long)];
1040
1041	/*
1042	 * Generate a segment header.
1043	 */
1044	CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1045	segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
1046
1047	/*
1048	 * Add the machine-dependent header info
1049	 */
1050	/* nothing for now */
1051
1052	return (bdevsw[major(dumpdev)].d_dump)
1053	    (dumpdev, dumplo, (caddr_t)buf, dbtob(1));
1054}
1055
1056/*
1057 * Dump the kernel's image to the swap partition.
1058 */
1059#define	BYTES_PER_DUMP	NBPG
1060
1061void
1062dumpsys(void)
1063{
1064	int psize, bytes, i, n;
1065	caddr_t maddr;
1066	daddr64_t blkno;
1067	int (*dump)(dev_t, daddr64_t, caddr_t, size_t);
1068	int error;
1069
1070	/* Save registers
1071	savectx(&dumppcb); */
1072
1073	if (dumpsize == 0)
1074		dumpconf();
1075	if (dumplo <= 0) {
1076		printf("\ndump to dev %x not possible\n", dumpdev);
1077		return;
1078	}
1079	printf("\ndumping to dev %x, offset %ld\n", dumpdev, dumplo);
1080
1081#ifdef UVM_SWAP_ENCRYPT
1082	uvm_swap_finicrypt_all();
1083#endif
1084
1085	psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
1086	printf("dump ");
1087	if (psize == -1) {
1088		printf("area unavailable\n");
1089		return;
1090	}
1091
1092	if (!(error = cpu_dump())) {
1093
1094		bytes = ptoa(physmem);
1095		maddr = NULL;
1096		blkno = dumplo + cpu_dumpsize();
1097		dump = bdevsw[major(dumpdev)].d_dump;
1098		/* TODO block map the whole physical memory */
1099		for (i = 0; i < bytes; i += n) {
1100
1101			/* Print out how many MBs we are to go. */
1102			n = bytes - i;
1103			if (n && (n % (1024*1024)) == 0)
1104				printf("%d ", n / (1024 * 1024));
1105
1106			/* Limit size for next transfer. */
1107
1108			if (n > BYTES_PER_DUMP)
1109				n = BYTES_PER_DUMP;
1110
1111			if ((error = (*dump)(dumpdev, blkno, maddr, n)))
1112				break;
1113			maddr += n;
1114			blkno += btodb(n);
1115		}
1116	}
1117
1118	switch (error) {
1119	case ENXIO:	printf("device bad\n");			break;
1120	case EFAULT:	printf("device not ready\n");		break;
1121	case EINVAL:	printf("area improper\n");		break;
1122	case EIO:	printf("i/o error\n");			break;
1123	case EINTR:	printf("aborted from console\n");	break;
1124	case 0:		printf("succeeded\n");			break;
1125	default:	printf("error %d\n", error);		break;
1126	}
1127}
1128
1129/* bcopy(), error on fault */
1130int
1131kcopy(from, to, size)
1132	const void *from;
1133	void *to;
1134	size_t size;
1135{
1136	return spcopy(HPPA_SID_KERNEL, from, HPPA_SID_KERNEL, to, size);
1137}
1138
1139int
1140copystr(src, dst, size, lenp)
1141	const void *src;
1142	void *dst;
1143	size_t size;
1144	size_t *lenp;
1145{
1146	return spstrcpy(HPPA_SID_KERNEL, src, HPPA_SID_KERNEL, dst, size, lenp);
1147}
1148
1149int
1150copyinstr(src, dst, size, lenp)
1151	const void *src;
1152	void *dst;
1153	size_t size;
1154	size_t *lenp;
1155{
1156	return spstrcpy(curproc->p_addr->u_pcb.pcb_space, src,
1157	    HPPA_SID_KERNEL, dst, size, lenp);
1158}
1159
1160
1161int
1162copyoutstr(src, dst, size, lenp)
1163	const void *src;
1164	void *dst;
1165	size_t size;
1166	size_t *lenp;
1167{
1168	return spstrcpy(HPPA_SID_KERNEL, src,
1169	    curproc->p_addr->u_pcb.pcb_space, dst, size, lenp);
1170}
1171
1172
1173int
1174copyin(src, dst, size)
1175	const void *src;
1176	void *dst;
1177	size_t size;
1178{
1179	return spcopy(curproc->p_addr->u_pcb.pcb_space, src,
1180	    HPPA_SID_KERNEL, dst, size);
1181}
1182
1183int
1184copyout(src, dst, size)
1185	const void *src;
1186	void *dst;
1187	size_t size;
1188{
1189	return spcopy(HPPA_SID_KERNEL, src,
1190	    curproc->p_addr->u_pcb.pcb_space, dst, size);
1191}
1192
1193/*
1194 * Set registers on exec.
1195 */
1196void
1197setregs(p, pack, stack, retval)
1198	struct proc *p;
1199	struct exec_package *pack;
1200	u_long stack;
1201	register_t *retval;
1202{
1203	extern paddr_t fpu_curpcb;	/* from locore.S */
1204	struct trapframe *tf = p->p_md.md_regs;
1205	struct pcb *pcb = &p->p_addr->u_pcb;
1206	register_t zero;
1207
1208	tf->tf_flags = TFF_SYS|TFF_LAST;
1209	tf->tf_iioq_tail = 4 +
1210	    (tf->tf_iioq_head = pack->ep_entry | HPPA_PC_PRIV_USER);
1211	tf->tf_rp = 0;
1212	tf->tf_arg0 = (u_long)PS_STRINGS;
1213	tf->tf_arg1 = tf->tf_arg2 = 0; /* XXX dynload stuff */
1214
1215	/* setup terminal stack frame */
1216	stack = (stack + 0x1f) & ~0x1f;
1217	tf->tf_r3 = stack;
1218	tf->tf_sp = stack += HPPA_FRAME_SIZE;
1219	zero = 0;
1220	copyout(&zero, (caddr_t)(stack - HPPA_FRAME_SIZE), sizeof(register_t));
1221	copyout(&zero, (caddr_t)(stack + HPPA_FRAME_CRP), sizeof(register_t));
1222
1223	/* reset any of the pending FPU exceptions */
1224	if (tf->tf_cr30 == fpu_curpcb) {
1225		fpu_exit();
1226		fpu_curpcb = 0;
1227	}
1228	pcb->pcb_fpregs->fpr_regs[0] = ((u_int64_t)HPPA_FPU_INIT) << 32;
1229	pcb->pcb_fpregs->fpr_regs[1] = 0;
1230	pcb->pcb_fpregs->fpr_regs[2] = 0;
1231	pcb->pcb_fpregs->fpr_regs[3] = 0;
1232
1233	p->p_md.md_bpva = 0;
1234
1235	retval[1] = 0;
1236}
1237
1238/*
1239 * Send an interrupt to process.
1240 */
1241void
1242sendsig(catcher, sig, mask, code, type, val)
1243	sig_t catcher;
1244	int sig, mask;
1245	u_long code;
1246	int type;
1247	union sigval val;
1248{
1249	extern paddr_t fpu_curpcb;	/* from locore.S */
1250	extern u_int fpu_enable;
1251	struct proc *p = curproc;
1252	struct trapframe *tf = p->p_md.md_regs;
1253	struct pcb *pcb = &p->p_addr->u_pcb;
1254	struct sigacts *psp = p->p_sigacts;
1255	struct sigcontext ksc;
1256	siginfo_t ksi;
1257	register_t scp, sip;
1258	int sss;
1259
1260#ifdef DEBUG
1261	if ((sigdebug & SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1262		printf("sendsig: %s[%d] sig %d catcher %p\n",
1263		    p->p_comm, p->p_pid, sig, catcher);
1264#endif
1265
1266	/* flush the FPU ctx first */
1267	if (tf->tf_cr30 == fpu_curpcb) {
1268		mtctl(fpu_enable, CR_CCR);
1269		fpu_save(fpu_curpcb);
1270		/* fpu_curpcb = 0; only needed if fpregs are preset */
1271		mtctl(0, CR_CCR);
1272	}
1273
1274	ksc.sc_onstack = psp->ps_sigstk.ss_flags & SS_ONSTACK;
1275
1276	/*
1277	 * Allocate space for the signal handler context.
1278	 */
1279	if ((psp->ps_flags & SAS_ALTSTACK) && !ksc.sc_onstack &&
1280	    (psp->ps_sigonstack & sigmask(sig))) {
1281		scp = (register_t)psp->ps_sigstk.ss_sp;
1282		psp->ps_sigstk.ss_flags |= SS_ONSTACK;
1283	} else
1284		scp = (tf->tf_sp + 63) & ~63;
1285
1286	sss = (sizeof(ksc) + 63) & ~63;
1287	sip = 0;
1288	if (psp->ps_siginfo & sigmask(sig)) {
1289		sip = scp + sizeof(ksc);
1290		sss += (sizeof(ksi) + 63) & ~63;
1291	}
1292
1293#ifdef DEBUG
1294	if ((tf->tf_iioq_head & ~PAGE_MASK) == SYSCALLGATE)
1295		printf("sendsig: interrupted syscall at 0x%x:0x%x flags %b\n",
1296		    tf->tf_iioq_head, tf->tf_iioq_tail, tf->tf_ipsw, PSL_BITS);
1297#endif
1298
1299	ksc.sc_mask = mask;
1300	ksc.sc_fp = scp + sss;
1301	ksc.sc_ps = tf->tf_ipsw;
1302	ksc.sc_pcoqh = tf->tf_iioq_head;
1303	ksc.sc_pcoqt = tf->tf_iioq_tail;
1304	ksc.sc_regs[0] = tf->tf_t1;
1305	ksc.sc_regs[1] = tf->tf_t2;
1306	ksc.sc_regs[2] = tf->tf_sp;
1307	ksc.sc_regs[3] = tf->tf_t3;
1308	ksc.sc_regs[4] = tf->tf_sar;
1309	ksc.sc_regs[5] = tf->tf_r1;
1310	ksc.sc_regs[6] = tf->tf_rp;
1311	ksc.sc_regs[7] = tf->tf_r3;
1312	ksc.sc_regs[8] = tf->tf_r4;
1313	ksc.sc_regs[9] = tf->tf_r5;
1314	ksc.sc_regs[10] = tf->tf_r6;
1315	ksc.sc_regs[11] = tf->tf_r7;
1316	ksc.sc_regs[12] = tf->tf_r8;
1317	ksc.sc_regs[13] = tf->tf_r9;
1318	ksc.sc_regs[14] = tf->tf_r10;
1319	ksc.sc_regs[15] = tf->tf_r11;
1320	ksc.sc_regs[16] = tf->tf_r12;
1321	ksc.sc_regs[17] = tf->tf_r13;
1322	ksc.sc_regs[18] = tf->tf_r14;
1323	ksc.sc_regs[19] = tf->tf_r15;
1324	ksc.sc_regs[20] = tf->tf_r16;
1325	ksc.sc_regs[21] = tf->tf_r17;
1326	ksc.sc_regs[22] = tf->tf_r18;
1327	ksc.sc_regs[23] = tf->tf_t4;
1328	ksc.sc_regs[24] = tf->tf_arg3;
1329	ksc.sc_regs[25] = tf->tf_arg2;
1330	ksc.sc_regs[26] = tf->tf_arg1;
1331	ksc.sc_regs[27] = tf->tf_arg0;
1332	ksc.sc_regs[28] = tf->tf_dp;
1333	ksc.sc_regs[29] = tf->tf_ret0;
1334	ksc.sc_regs[30] = tf->tf_ret1;
1335	ksc.sc_regs[31] = tf->tf_r31;
1336	bcopy(p->p_addr->u_pcb.pcb_fpregs, ksc.sc_fpregs,
1337	    sizeof(ksc.sc_fpregs));
1338
1339	sss += HPPA_FRAME_SIZE;
1340	tf->tf_arg0 = sig;
1341	tf->tf_arg1 = sip;
1342	tf->tf_arg2 = tf->tf_r4 = scp;
1343	tf->tf_arg3 = (register_t)catcher;
1344	tf->tf_sp = scp + sss;
1345	tf->tf_ipsw &= ~(PSL_N|PSL_B|PSL_T);
1346	tf->tf_iioq_head = HPPA_PC_PRIV_USER | p->p_sigcode;
1347	tf->tf_iioq_tail = tf->tf_iioq_head + 4;
1348	tf->tf_iisq_tail = tf->tf_iisq_head = pcb->pcb_space;
1349	/* disable tracing in the trapframe */
1350
1351#ifdef DEBUG
1352	if ((sigdebug & SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1353		printf("sendsig(%d): sig %d scp %p fp %p sp 0x%x\n",
1354		    p->p_pid, sig, scp, ksc.sc_fp, (register_t)scp + sss);
1355#endif
1356
1357	if (copyout(&ksc, (void *)scp, sizeof(ksc)))
1358		sigexit(p, SIGILL);
1359
1360	if (sip) {
1361		initsiginfo(&ksi, sig, code, type, val);
1362		if (copyout(&ksi, (void *)sip, sizeof(ksi)))
1363			sigexit(p, SIGILL);
1364	}
1365
1366	if (copyout(&tf->tf_r3, (caddr_t)(tf->tf_sp - HPPA_FRAME_SIZE),
1367	    sizeof(register_t)))
1368		sigexit(p, SIGILL);
1369	tf->tf_r3 = tf->tf_sp - HPPA_FRAME_SIZE;
1370
1371#ifdef DEBUG
1372	if ((sigdebug & SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1373		printf("sendsig(%d): pc 0x%x catcher 0x%x\n", p->p_pid,
1374		    tf->tf_iioq_head, tf->tf_arg3);
1375#endif
1376}
1377
1378int
1379sys_sigreturn(p, v, retval)
1380	struct proc *p;
1381	void *v;
1382	register_t *retval;
1383{
1384	extern paddr_t fpu_curpcb;	/* from locore.S */
1385	struct sys_sigreturn_args /* {
1386		syscallarg(struct sigcontext *) sigcntxp;
1387	} */ *uap = v;
1388	struct sigcontext *scp, ksc;
1389	struct trapframe *tf = p->p_md.md_regs;
1390	int error;
1391
1392	scp = SCARG(uap, sigcntxp);
1393#ifdef DEBUG
1394	if ((sigdebug & SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1395		printf("sigreturn: pid %d, scp %p\n", p->p_pid, scp);
1396#endif
1397
1398	/* flush the FPU ctx first */
1399	if (tf->tf_cr30 == fpu_curpcb) {
1400		fpu_exit();
1401		fpu_curpcb = 0;
1402	}
1403
1404	if ((error = copyin((caddr_t)scp, (caddr_t)&ksc, sizeof ksc)))
1405		return (error);
1406
1407#define PSL_MBS (PSL_C|PSL_Q|PSL_P|PSL_D|PSL_I)
1408#define PSL_MBZ (PSL_Y|PSL_Z|PSL_S|PSL_X|PSL_M|PSL_R)
1409	if ((ksc.sc_ps & (PSL_MBS|PSL_MBZ)) != PSL_MBS)
1410		return (EINVAL);
1411
1412	if (ksc.sc_onstack)
1413		p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK;
1414	else
1415		p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK;
1416	p->p_sigmask = ksc.sc_mask &~ sigcantmask;
1417
1418	tf->tf_t1 = ksc.sc_regs[0];		/* r22 */
1419	tf->tf_t2 = ksc.sc_regs[1];		/* r21 */
1420	tf->tf_sp = ksc.sc_regs[2];
1421	tf->tf_t3 = ksc.sc_regs[3];		/* r20 */
1422	tf->tf_sar = ksc.sc_regs[4];
1423	tf->tf_r1 = ksc.sc_regs[5];
1424	tf->tf_rp = ksc.sc_regs[6];
1425	tf->tf_r3 = ksc.sc_regs[7];
1426	tf->tf_r4 = ksc.sc_regs[8];
1427	tf->tf_r5 = ksc.sc_regs[9];
1428	tf->tf_r6 = ksc.sc_regs[10];
1429	tf->tf_r7 = ksc.sc_regs[11];
1430	tf->tf_r8 = ksc.sc_regs[12];
1431	tf->tf_r9 = ksc.sc_regs[13];
1432	tf->tf_r10 = ksc.sc_regs[14];
1433	tf->tf_r11 = ksc.sc_regs[15];
1434	tf->tf_r12 = ksc.sc_regs[16];
1435	tf->tf_r13 = ksc.sc_regs[17];
1436	tf->tf_r14 = ksc.sc_regs[18];
1437	tf->tf_r15 = ksc.sc_regs[19];
1438	tf->tf_r16 = ksc.sc_regs[20];
1439	tf->tf_r17 = ksc.sc_regs[21];
1440	tf->tf_r18 = ksc.sc_regs[22];
1441	tf->tf_t4 = ksc.sc_regs[23];		/* r19 */
1442	tf->tf_arg3 = ksc.sc_regs[24];		/* r23 */
1443	tf->tf_arg2 = ksc.sc_regs[25];		/* r24 */
1444	tf->tf_arg1 = ksc.sc_regs[26];		/* r25 */
1445	tf->tf_arg0 = ksc.sc_regs[27];		/* r26 */
1446	tf->tf_dp = ksc.sc_regs[28];
1447	tf->tf_ret0 = ksc.sc_regs[29];
1448	tf->tf_ret1 = ksc.sc_regs[30];
1449	tf->tf_r31 = ksc.sc_regs[31];
1450	bcopy(ksc.sc_fpregs, p->p_addr->u_pcb.pcb_fpregs,
1451	    sizeof(ksc.sc_fpregs));
1452
1453	tf->tf_iioq_head = ksc.sc_pcoqh | HPPA_PC_PRIV_USER;
1454	tf->tf_iioq_tail = ksc.sc_pcoqt | HPPA_PC_PRIV_USER;
1455	if ((tf->tf_iioq_head & ~PAGE_MASK) == SYSCALLGATE)
1456		tf->tf_iisq_head = HPPA_SID_KERNEL;
1457	else
1458		tf->tf_iisq_head = p->p_addr->u_pcb.pcb_space;
1459	if ((tf->tf_iioq_tail & ~PAGE_MASK) == SYSCALLGATE)
1460		tf->tf_iisq_tail = HPPA_SID_KERNEL;
1461	else
1462		tf->tf_iisq_tail = p->p_addr->u_pcb.pcb_space;
1463	tf->tf_ipsw = ksc.sc_ps | (curcpu()->ci_psw & PSL_O);
1464
1465#ifdef DEBUG
1466	if ((sigdebug & SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1467		printf("sigreturn(%d): returns\n", p->p_pid);
1468#endif
1469	return (EJUSTRETURN);
1470}
1471
1472#ifdef COMPAT_HPUX
1473void
1474hpux_sendsig(sig_t catcher, int sig, int mask, u_long code, int type,
1475    union sigval val)
1476{
1477	extern paddr_t fpu_curpcb;	/* from locore.S */
1478	extern u_int fpu_enable;
1479	struct proc *p = curproc;
1480	struct pcb *pcb = &p->p_addr->u_pcb;
1481	struct trapframe *tf = p->p_md.md_regs;
1482	struct sigacts *psp = p->p_sigacts;
1483	struct hpux_sigcontext hsc;
1484	int sss;
1485	register_t scp;
1486
1487#ifdef DEBUG
1488	if ((sigdebug & SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1489		printf("hpux_sendsig: %s[%d] sig %d catcher %p\n",
1490		    p->p_comm, p->p_pid, sig, catcher);
1491#endif
1492	/* flush the FPU ctx first */
1493	if (tf->tf_cr30 == fpu_curpcb) {
1494		mtctl(fpu_enable, CR_CCR);
1495		fpu_save(fpu_curpcb);
1496		fpu_curpcb = 0;
1497		mtctl(0, CR_CCR);
1498	}
1499
1500	bzero(&hsc, sizeof hsc);
1501	hsc.sc_onstack = psp->ps_sigstk.ss_flags & SS_ONSTACK;
1502	hsc.sc_omask = mask;
1503	/* sc_scact ??? */
1504
1505	hsc.sc_ret0 = tf->tf_ret0;
1506	hsc.sc_ret1 = tf->tf_ret1;
1507
1508	hsc.sc_frame[0] = hsc.sc_args[0] = sig;
1509	hsc.sc_frame[1] = hsc.sc_args[1] = NULL;
1510	hsc.sc_frame[2] = hsc.sc_args[2] = scp;
1511
1512	/*
1513	 * Allocate space for the signal handler context.
1514	 */
1515	if ((psp->ps_flags & SAS_ALTSTACK) && !hsc.sc_onstack &&
1516	    (psp->ps_sigonstack & sigmask(sig))) {
1517		scp = (register_t)psp->ps_sigstk.ss_sp;
1518		psp->ps_sigstk.ss_flags |= SS_ONSTACK;
1519	} else
1520		scp = (tf->tf_sp + 63) & ~63;
1521
1522	sss = (sizeof(hsc) + 63) & ~63;
1523
1524	if (tf->tf_flags & TFF_SYS) {
1525		hsc.sc_tfflags = HPUX_TFF_SYSCALL;
1526		hsc.sc_syscall = tf->tf_t1;
1527	} else if (tf->tf_flags & TFF_INTR)
1528		hsc.sc_tfflags = HPUX_TFF_INTR;
1529	else
1530		hsc.sc_tfflags = HPUX_TFF_TRAP;
1531
1532	hsc.sc_regs[0] = tf->tf_r1;
1533	hsc.sc_regs[1] = tf->tf_rp;
1534	hsc.sc_regs[2] = tf->tf_r3;
1535	hsc.sc_regs[3] = tf->tf_r4;
1536	hsc.sc_regs[4] = tf->tf_r5;
1537	hsc.sc_regs[5] = tf->tf_r6;
1538	hsc.sc_regs[6] = tf->tf_r7;
1539	hsc.sc_regs[7] = tf->tf_r8;
1540	hsc.sc_regs[8] = tf->tf_r9;
1541	hsc.sc_regs[9] = tf->tf_r10;
1542	hsc.sc_regs[10] = tf->tf_r11;
1543	hsc.sc_regs[11] = tf->tf_r12;
1544	hsc.sc_regs[12] = tf->tf_r13;
1545	hsc.sc_regs[13] = tf->tf_r14;
1546	hsc.sc_regs[14] = tf->tf_r15;
1547	hsc.sc_regs[15] = tf->tf_r16;
1548	hsc.sc_regs[16] = tf->tf_r17;
1549	hsc.sc_regs[17] = tf->tf_r18;
1550	hsc.sc_regs[18] = tf->tf_t4;
1551	hsc.sc_regs[19] = tf->tf_t3;
1552	hsc.sc_regs[20] = tf->tf_t2;
1553	hsc.sc_regs[21] = tf->tf_t1;
1554	hsc.sc_regs[22] = tf->tf_arg3;
1555	hsc.sc_regs[23] = tf->tf_arg2;
1556	hsc.sc_regs[24] = tf->tf_arg1;
1557	hsc.sc_regs[25] = tf->tf_arg0;
1558	hsc.sc_regs[26] = tf->tf_dp;
1559	hsc.sc_regs[27] = tf->tf_ret0;
1560	hsc.sc_regs[28] = tf->tf_ret1;
1561	hsc.sc_regs[29] = tf->tf_sp;
1562	hsc.sc_regs[30] = tf->tf_r31;
1563	hsc.sc_regs[31] = tf->tf_sar;
1564	hsc.sc_regs[32] = tf->tf_iioq_head;
1565	hsc.sc_regs[33] = tf->tf_iisq_head;
1566	hsc.sc_regs[34] = tf->tf_iioq_tail;
1567	hsc.sc_regs[35] = tf->tf_iisq_tail;
1568	hsc.sc_regs[35] = tf->tf_eiem;
1569	hsc.sc_regs[36] = tf->tf_iir;
1570	hsc.sc_regs[37] = tf->tf_isr;
1571	hsc.sc_regs[38] = tf->tf_ior;
1572	hsc.sc_regs[39] = tf->tf_ipsw;
1573	hsc.sc_regs[40] = 0;
1574	hsc.sc_regs[41] = tf->tf_sr4;
1575	hsc.sc_regs[42] = tf->tf_sr0;
1576	hsc.sc_regs[43] = tf->tf_sr1;
1577	hsc.sc_regs[44] = tf->tf_sr2;
1578	hsc.sc_regs[45] = tf->tf_sr3;
1579	hsc.sc_regs[46] = tf->tf_sr5;
1580	hsc.sc_regs[47] = tf->tf_sr6;
1581	hsc.sc_regs[48] = tf->tf_sr7;
1582	hsc.sc_regs[49] = tf->tf_rctr;
1583	hsc.sc_regs[50] = tf->tf_pidr1;
1584	hsc.sc_regs[51] = tf->tf_pidr2;
1585	hsc.sc_regs[52] = tf->tf_ccr;
1586	hsc.sc_regs[53] = tf->tf_pidr3;
1587	hsc.sc_regs[54] = tf->tf_pidr4;
1588	/* hsc.sc_regs[55] = tf->tf_cr24; */
1589	hsc.sc_regs[56] = tf->tf_vtop;
1590	/* hsc.sc_regs[57] = tf->tf_cr26; */
1591	/* hsc.sc_regs[58] = tf->tf_cr27; */
1592	hsc.sc_regs[59] = 0;
1593	hsc.sc_regs[60] = 0;
1594	bcopy(p->p_addr->u_pcb.pcb_fpregs, hsc.sc_fpregs,
1595	    sizeof(hsc.sc_fpregs));
1596
1597	tf->tf_rp = (register_t)pcb->pcb_sigreturn;
1598	tf->tf_arg3 = (register_t)catcher;
1599	tf->tf_sp = scp + sss;
1600	tf->tf_ipsw &= ~(PSL_N|PSL_B);
1601	tf->tf_iioq_head = HPPA_PC_PRIV_USER | p->p_sigcode;
1602	tf->tf_iioq_tail = tf->tf_iioq_head + 4;
1603
1604	if (copyout(&hsc, (void *)scp, sizeof(hsc)))
1605		sigexit(p, SIGILL);
1606
1607#ifdef DEBUG
1608	if ((sigdebug & SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1609		printf("sendsig(%d): pc 0x%x rp 0x%x\n", p->p_pid,
1610		    tf->tf_iioq_head, tf->tf_rp);
1611#endif
1612}
1613#endif
1614
1615/*
1616 * machine dependent system variables.
1617 */
1618int
1619cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1620	int *name;
1621	u_int namelen;
1622	void *oldp;
1623	size_t *oldlenp;
1624	void *newp;
1625	size_t newlen;
1626	struct proc *p;
1627{
1628	extern paddr_t fpu_curpcb;	/* from locore.S */
1629	extern u_int fpu_enable;
1630	extern int cpu_fpuena;
1631	dev_t consdev;
1632	int oldval, ret;
1633
1634	/* all sysctl names at this level are terminal */
1635	if (namelen != 1)
1636		return (ENOTDIR);	/* overloaded */
1637	switch (name[0]) {
1638	case CPU_CONSDEV:
1639		if (cn_tab != NULL)
1640			consdev = cn_tab->cn_dev;
1641		else
1642			consdev = NODEV;
1643		return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
1644		    sizeof consdev));
1645	case CPU_FPU:
1646		if (fpu_curpcb) {
1647			mtctl(fpu_enable, CR_CCR);
1648			fpu_save(fpu_curpcb);
1649			fpu_curpcb = 0;
1650			mtctl(0, CR_CCR);
1651		}
1652		return (sysctl_int(oldp, oldlenp, newp, newlen, &cpu_fpuena));
1653	case CPU_LED_BLINK:
1654		oldval = led_blink;
1655		ret = sysctl_int(oldp, oldlenp, newp, newlen, &led_blink);
1656		/*
1657		 * If we were false and are now true, start the timer.
1658		 */
1659		if (!oldval && led_blink > oldval)
1660			blink_led_timeout(NULL);
1661		return (ret);
1662	default:
1663		return (EOPNOTSUPP);
1664	}
1665	/* NOTREACHED */
1666}
1667
1668
1669/*
1670 * consinit:
1671 * initialize the system console.
1672 */
1673void
1674consinit(void)
1675{
1676	/*
1677	 * Initial console setup has been done in pdc_init().
1678	 */
1679}
1680
1681
1682struct blink_led_softc {
1683	SLIST_HEAD(, blink_led) bls_head;
1684	int bls_on;
1685	struct timeout bls_to;
1686} blink_sc = { SLIST_HEAD_INITIALIZER(bls_head), 0 };
1687
1688void
1689blink_led_register(struct blink_led *l)
1690{
1691	if (SLIST_EMPTY(&blink_sc.bls_head)) {
1692		timeout_set(&blink_sc.bls_to, blink_led_timeout, &blink_sc);
1693		blink_sc.bls_on = 0;
1694		if (led_blink)
1695			timeout_add(&blink_sc.bls_to, 1);
1696	}
1697	SLIST_INSERT_HEAD(&blink_sc.bls_head, l, bl_next);
1698}
1699
1700void
1701blink_led_timeout(void *vsc)
1702{
1703	struct blink_led_softc *sc = &blink_sc;
1704	struct blink_led *l;
1705	int t;
1706
1707	if (SLIST_EMPTY(&sc->bls_head))
1708		return;
1709
1710	SLIST_FOREACH(l, &sc->bls_head, bl_next) {
1711		(*l->bl_func)(l->bl_arg, sc->bls_on);
1712	}
1713	sc->bls_on = !sc->bls_on;
1714
1715	if (!led_blink)
1716		return;
1717
1718	/*
1719	 * Blink rate is:
1720	 *      full cycle every second if completely idle (loadav = 0)
1721	 *      full cycle every 2 seconds if loadav = 1
1722	 *      full cycle every 3 seconds if loadav = 2
1723	 * etc.
1724	 */
1725	t = (((averunnable.ldavg[0] + FSCALE) * hz) >> (FSHIFT + 1));
1726	timeout_add(&sc->bls_to, t);
1727}
1728