machdep.c revision 1.172
1/*	$OpenBSD: machdep.c,v 1.172 2009/06/03 21:30:19 beck Exp $	*/
2
3/*
4 * Copyright (c) 1999-2003 Michael Shalayeff
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/signalvar.h>
32#include <sys/kernel.h>
33#include <sys/proc.h>
34#include <sys/buf.h>
35#include <sys/reboot.h>
36#include <sys/device.h>
37#include <sys/conf.h>
38#include <sys/file.h>
39#include <sys/timeout.h>
40#include <sys/malloc.h>
41#include <sys/mbuf.h>
42#include <sys/msgbuf.h>
43#include <sys/ioctl.h>
44#include <sys/tty.h>
45#include <sys/user.h>
46#include <sys/exec.h>
47#include <sys/sysctl.h>
48#include <sys/core.h>
49#include <sys/kcore.h>
50#include <sys/extent.h>
51#ifdef SYSVMSG
52#include <sys/msg.h>
53#endif
54
55#include <sys/mount.h>
56#include <sys/syscallargs.h>
57
58#include <uvm/uvm.h>
59#include <uvm/uvm_page.h>
60#include <uvm/uvm_swap.h>
61
62#include <dev/cons.h>
63
64#include <machine/pdc.h>
65#include <machine/iomod.h>
66#include <machine/psl.h>
67#include <machine/reg.h>
68#include <machine/cpufunc.h>
69#include <machine/autoconf.h>
70#include <machine/kcore.h>
71
72#ifdef COMPAT_HPUX
73#include <compat/hpux/hpux.h>
74#include <compat/hpux/hpux_sig.h>
75#include <compat/hpux/hpux_util.h>
76#include <compat/hpux/hpux_syscallargs.h>
77#include <machine/hpux_machdep.h>
78#endif
79
80#ifdef DDB
81#include <machine/db_machdep.h>
82#include <ddb/db_access.h>
83#include <ddb/db_sym.h>
84#include <ddb/db_extern.h>
85#endif
86
87#include <hppa/dev/cpudevs.h>
88
89/*
90 * Patchable buffer cache parameters
91 */
92#ifndef BUFCACHEPERCENT
93#define BUFCACHEPERCENT 10
94#endif /* BUFCACHEPERCENT */
95
96#ifdef BUFPAGES
97int bufpages = BUFPAGES;
98#else
99int bufpages = 0;
100#endif
101int bufcachepercent = BUFCACHEPERCENT;
102
103/*
104 * Different kinds of flags used throughout the kernel.
105 */
106int cold = 1;			/* unset when engine is up to go */
107extern int msgbufmapped;	/* set when safe to use msgbuf */
108
109/*
110 * cache configuration, for most machines is the same
111 * numbers, so it makes sense to do defines w/ numbers depending
112 * on configured cpu types in the kernel
113 */
114int icache_stride, icache_line_mask;
115int dcache_stride, dcache_line_mask;
116
117/*
118 * things to not kill
119 */
120volatile u_int8_t *machine_ledaddr;
121int machine_ledword, machine_leds;
122struct cpu_info cpu_info_primary;
123
124/*
125 * CPU params (should be the same for all cpus in the system)
126 */
127struct pdc_cache pdc_cache PDC_ALIGNMENT;
128struct pdc_btlb pdc_btlb PDC_ALIGNMENT;
129struct pdc_model pdc_model PDC_ALIGNMENT;
130
131	/* w/ a little deviation should be the same for all installed cpus */
132u_int	cpu_ticksnum, cpu_ticksdenom;
133
134	/* exported info */
135char	machine[] = MACHINE;
136char	cpu_model[128];
137enum hppa_cpu_type cpu_type;
138const char *cpu_typename;
139int	cpu_hvers;
140u_int	fpu_version;
141#ifdef COMPAT_HPUX
142int	cpu_model_hpux;	/* contains HPUX_SYSCONF_CPU* kind of value */
143#endif
144
145int	led_blink;
146
147/*
148 * exported methods for cpus
149 */
150int (*cpu_desidhash)(void);
151int (*cpu_hpt_init)(vaddr_t hpt, vsize_t hptsize);
152int (*cpu_ibtlb_ins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
153	    vsize_t sz, u_int prot);
154int (*cpu_dbtlb_ins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
155	    vsize_t sz, u_int prot);
156
157dev_t	bootdev;
158int	physmem, resvmem, resvphysmem, esym;
159paddr_t	avail_end;
160
161/*
162 * Things for MI glue to stick on.
163 */
164struct user *proc0paddr;
165long mem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(64) / sizeof(long)];
166struct extent *hppa_ex;
167
168struct vm_map *exec_map = NULL;
169struct vm_map *phys_map = NULL;
170/* Virtual page frame for /dev/mem (see mem.c) */
171vaddr_t vmmap;
172
173void delay_init(void);
174static __inline void fall(int, int, int, int, int);
175void dumpsys(void);
176void hpmc_dump(void);
177void cpuid(void);
178void blink_led_timeout(void *);
179
180/*
181 * wide used hardware params
182 */
183struct pdc_hwtlb pdc_hwtlb PDC_ALIGNMENT;
184struct pdc_coproc pdc_coproc PDC_ALIGNMENT;
185struct pdc_coherence pdc_coherence PDC_ALIGNMENT;
186struct pdc_spidb pdc_spidbits PDC_ALIGNMENT;
187struct pdc_model pdc_model PDC_ALIGNMENT;
188
189#ifdef DEBUG
190int sigdebug = 0;
191pid_t sigpid = 0;
192#define SDB_FOLLOW	0x01
193#endif
194
195/*
196 * Whatever CPU types we support
197 */
198extern const u_int itlb_x[], itlbna_x[], dtlb_x[], dtlbna_x[], tlbd_x[];
199extern const u_int itlb_s[], itlbna_s[], dtlb_s[], dtlbna_s[], tlbd_s[];
200extern const u_int itlb_t[], itlbna_t[], dtlb_t[], dtlbna_t[], tlbd_t[];
201extern const u_int itlb_l[], itlbna_l[], dtlb_l[], dtlbna_l[], tlbd_l[];
202extern const u_int itlb_u[], itlbna_u[], dtlb_u[], dtlbna_u[], tlbd_u[];
203int iibtlb_s(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
204    vsize_t sz, u_int prot);
205int idbtlb_s(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
206    vsize_t sz, u_int prot);
207int ibtlb_t(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
208    vsize_t sz, u_int prot);
209int ibtlb_l(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
210    vsize_t sz, u_int prot);
211int ibtlb_u(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
212    vsize_t sz, u_int prot);
213int ibtlb_g(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
214    vsize_t sz, u_int prot);
215int pbtlb_g(int i);
216int pbtlb_u(int i);
217int hpti_l(vaddr_t, vsize_t);
218int hpti_u(vaddr_t, vsize_t);
219int hpti_g(vaddr_t, vsize_t);
220int desidhash_x(void);
221int desidhash_s(void);
222int desidhash_t(void);
223int desidhash_l(void);
224int desidhash_u(void);
225const struct hppa_cpu_typed {
226	char name[8];
227	enum hppa_cpu_type type;
228	int  cpuid;
229	int  features;
230	int  patch;
231	int  (*desidhash)(void);
232	int  (*dbtlbins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
233	     vsize_t sz, u_int prot);
234	int  (*ibtlbins)(int i, pa_space_t sp, vaddr_t va, paddr_t pa,
235	     vsize_t sz, u_int prot);
236	int  (*btlbprg)(int i);
237	int  (*hptinit)(vaddr_t hpt, vsize_t hptsize);
238} cpu_types[] = {
239#ifdef HP7000_CPU
240	{ "PCXS",  hpcxs,  0, 0, 3, desidhash_s, ibtlb_g, NULL, pbtlb_g},
241#endif
242#ifdef HP7100_CPU
243	{ "PCXT",  hpcxt, 0, HPPA_FTRS_BTLBU,
244	  2, desidhash_t, ibtlb_g, NULL, pbtlb_g},
245#endif
246#ifdef HP7200_CPU
247	{ "PCXT'", hpcxta,HPPA_CPU_PCXT2, HPPA_FTRS_BTLBU,
248	  2, desidhash_t, ibtlb_g, NULL, pbtlb_g},
249#endif
250#ifdef HP7100LC_CPU
251	{ "PCXL",  hpcxl, HPPA_CPU_PCXL, HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
252	  0, desidhash_l, ibtlb_g, NULL, pbtlb_g, hpti_g},
253#endif
254#ifdef HP7300LC_CPU
255	{ "PCXL2", hpcxl2,HPPA_CPU_PCXL2, HPPA_FTRS_BTLBU|HPPA_FTRS_HVT,
256	  0, desidhash_l, ibtlb_g, NULL, pbtlb_g, hpti_g},
257#endif
258#ifdef HP8000_CPU
259	{ "PCXU",  hpcxu, HPPA_CPU_PCXU, HPPA_FTRS_W32B,
260	  4, desidhash_u, ibtlb_u, NULL, pbtlb_u },
261#endif
262#ifdef HP8200_CPU
263	{ "PCXU+", hpcxu2,HPPA_CPU_PCXUP, HPPA_FTRS_W32B,
264	  4, desidhash_u, ibtlb_u, NULL, pbtlb_u },
265#endif
266#ifdef HP8500_CPU
267	{ "PCXW",  hpcxw, HPPA_CPU_PCXW, HPPA_FTRS_W32B,
268	  4, desidhash_u, ibtlb_u, NULL, pbtlb_u },
269#endif
270#ifdef HP8700_CPU
271	{ "PCXW2",  hpcxw, HPPA_CPU_PCXW2, HPPA_FTRS_W32B,
272	  4, desidhash_u, ibtlb_u, NULL, pbtlb_u },
273#endif
274	{ "", 0 }
275};
276
277int
278hppa_cpuspeed(int *mhz)
279{
280	*mhz = PAGE0->mem_10msec / 10000;
281
282	return (0);
283}
284
285void
286hppa_init(start)
287	paddr_t start;
288{
289	extern u_long cpu_hzticks;
290	extern int kernel_text;
291	vaddr_t v, v1;
292	int error;
293
294	pdc_init();	/* init PDC iface, so we can call em easy */
295
296	cpu_hzticks = (PAGE0->mem_10msec * 100) / hz;
297	delay_init();	/* calculate cpu clock ratio */
298
299	/* cache parameters */
300	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_DFLT,
301	    &pdc_cache)) < 0) {
302#ifdef DEBUG
303		printf("WARNING: PDC_CACHE error %d\n", error);
304#endif
305	}
306
307	dcache_line_mask = pdc_cache.dc_conf.cc_line * 16 - 1;
308	dcache_stride = pdc_cache.dc_stride;
309	icache_line_mask = pdc_cache.ic_conf.cc_line * 16 - 1;
310	icache_stride = pdc_cache.ic_stride;
311
312	/* cache coherence params (pbably available for 8k only) */
313	error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_SETCS,
314	    &pdc_coherence, 1, 1, 1, 1);
315#ifdef DEBUG
316	printf ("PDC_CACHE_SETCS: %d, %d, %d, %d (%d)\n",
317	    pdc_coherence.ia_cst, pdc_coherence.da_cst,
318	    pdc_coherence.ita_cst, pdc_coherence.dta_cst, error);
319#endif
320	error = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_GETSPIDB,
321	    &pdc_spidbits, 0, 0, 0, 0);
322	printf("SPID bits: 0x%x, error = %d\n", pdc_spidbits.spidbits, error);
323
324	/* setup hpmc handler */
325	{
326		extern u_int hpmc_v[];	/* from locore.s */
327		register u_int *p = hpmc_v;
328
329		if (pdc_call((iodcio_t)pdc, 0, PDC_INSTR, PDC_INSTR_DFLT, p))
330			*p = 0x08000240;
331
332		p[6] = (u_int)&hpmc_dump;
333		p[7] = 32;
334		p[5] = -(p[0] + p[1] + p[2] + p[3] + p[4] + p[6] + p[7]);
335	}
336
337	{
338		extern u_int hppa_toc[], hppa_toc_end[];
339		register u_int cksum, *p;
340
341		for (cksum = 0, p = hppa_toc; p < hppa_toc_end; p++)
342			cksum += *p;
343
344		*p = cksum;
345		PAGE0->ivec_toc = (u_int)hppa_toc;
346		PAGE0->ivec_toclen = (hppa_toc_end - hppa_toc + 1) * 4;
347	}
348
349	{
350		extern u_int hppa_pfr[], hppa_pfr_end[];
351		register u_int cksum, *p;
352
353		for (cksum = 0, p = hppa_pfr; p < hppa_pfr_end; p++)
354			cksum += *p;
355
356		*p = cksum;
357		PAGE0->ivec_mempf = (u_int)hppa_pfr;
358		PAGE0->ivec_mempflen = (hppa_pfr_end - hppa_pfr + 1) * 4;
359	}
360
361	cpuid();
362	ptlball();
363	ficacheall();
364	fdcacheall();
365
366	avail_end = trunc_page(PAGE0->imm_max_mem);
367	/*
368	 * XXX For some reason, using any physical memory above the
369	 * 2GB marker causes memory corruption on PA-RISC 2.0
370	 * machines.  Cap physical memory at 2GB for now.
371	 */
372#if 0
373	if (avail_end > SYSCALLGATE)
374		avail_end = SYSCALLGATE;
375#else
376	if (avail_end > 0x80000000)
377		avail_end = 0x80000000;
378#endif
379	physmem = atop(avail_end);
380	resvmem = atop(((vaddr_t)&kernel_text));
381
382	/* we hope this won't fail */
383	hppa_ex = extent_create("mem", 0x0, 0xffffffff, M_DEVBUF,
384	    (caddr_t)mem_ex_storage, sizeof(mem_ex_storage),
385	    EX_NOCOALESCE|EX_NOWAIT);
386	if (extent_alloc_region(hppa_ex, 0, (vaddr_t)PAGE0->imm_max_mem,
387	    EX_NOWAIT))
388		panic("cannot reserve main memory");
389
390	/*
391	 * Now allocate kernel dynamic variables
392	 */
393
394	v1 = v = round_page(start);
395#define valloc(name, type, num) (name) = (type *)v; v = (vaddr_t)((name)+(num))
396
397#ifdef SYSVMSG
398	valloc(msgpool, char, msginfo.msgmax);
399	valloc(msgmaps, struct msgmap, msginfo.msgseg);
400	valloc(msghdrs, struct msg, msginfo.msgtql);
401	valloc(msqids, struct msqid_ds, msginfo.msgmni);
402#endif
403#undef valloc
404	v = round_page(v);
405	bzero ((void *)v1, (v - v1));
406
407	/* sets resvphysmem */
408	pmap_bootstrap(v);
409
410	/* space has been reserved in pmap_bootstrap() */
411	initmsgbuf((caddr_t)(ptoa(physmem) - round_page(MSGBUFSIZE)),
412	    round_page(MSGBUFSIZE));
413
414	/* they say PDC_COPROC might turn fault light on */
415	pdc_call((iodcio_t)pdc, 0, PDC_CHASSIS, PDC_CHASSIS_DISP,
416	    PDC_OSTAT(PDC_OSTAT_RUN) | 0xCEC0);
417
418	cpu_cpuspeed = &hppa_cpuspeed;
419#ifdef DDB
420	ddb_init();
421#endif
422	ficacheall();
423	fdcacheall();
424}
425
426void
427cpuid()
428{
429	/*
430	 * Ptrs to various tlb handlers, to be filled
431	 * based on cpu features.
432	 * from locore.S
433	 */
434	extern u_int trap_ep_T_TLB_DIRTY[];
435	extern u_int trap_ep_T_DTLBMISS[];
436	extern u_int trap_ep_T_DTLBMISSNA[];
437	extern u_int trap_ep_T_ITLBMISS[];
438	extern u_int trap_ep_T_ITLBMISSNA[];
439
440	extern u_int fpu_enable;
441	extern int cpu_fpuena;
442	struct pdc_cpuid pdc_cpuid PDC_ALIGNMENT;
443	const struct hppa_cpu_typed *p = NULL;
444	u_int cpu_features;
445	int error;
446
447	/* may the scientific guessing begin */
448	cpu_features = 0;
449	cpu_type = 0;
450
451	/* identify system type */
452	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_MODEL, PDC_MODEL_INFO,
453	    &pdc_model)) < 0) {
454#ifdef DEBUG
455		printf("WARNING: PDC_MODEL error %d\n", error);
456#endif
457		pdc_model.hvers = 0;
458	}
459
460	bzero(&pdc_cpuid, sizeof(pdc_cpuid));
461	if (pdc_call((iodcio_t)pdc, 0, PDC_MODEL, PDC_MODEL_CPUID,
462	    &pdc_cpuid, 0, 0, 0, 0) >= 0) {
463
464		/* patch for old 8200 */
465		if (pdc_cpuid.version == HPPA_CPU_PCXU &&
466		    pdc_cpuid.revision > 0x0d)
467			pdc_cpuid.version = HPPA_CPU_PCXUP;
468
469		cpu_type = pdc_cpuid.version;
470	}
471
472	/* locate coprocessors and SFUs */
473	bzero(&pdc_coproc, sizeof(pdc_coproc));
474	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_COPROC, PDC_COPROC_DFLT,
475	    &pdc_coproc, 0, 0, 0, 0)) < 0) {
476		printf("WARNING: PDC_COPROC error %d\n", error);
477		cpu_fpuena = 0;
478	} else {
479		printf("pdc_coproc: 0x%x, 0x%x; model %x rev %x\n",
480		    pdc_coproc.ccr_enable, pdc_coproc.ccr_present,
481		    pdc_coproc.fpu_model, pdc_coproc.fpu_revision);
482		fpu_enable = pdc_coproc.ccr_enable & CCR_MASK;
483		cpu_fpuena = 1;
484
485		/* a kludge to detect PCXW */
486		if (pdc_coproc.fpu_model == HPPA_FPU_PCXW)
487			cpu_type = HPPA_CPU_PCXW;
488	}
489
490	/* BTLB params */
491	if (cpu_type < HPPA_CPU_PCXU &&
492	    (error = pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB,
493	     PDC_BTLB_DEFAULT, &pdc_btlb)) < 0) {
494#ifdef DEBUG
495		printf("WARNING: PDC_BTLB error %d\n", error);
496#endif
497	} else {
498#ifdef BTLBDEBUG
499		printf("btlb info: minsz=%d, maxsz=%d\n",
500		    pdc_btlb.min_size, pdc_btlb.max_size);
501		printf("btlb fixed: i=%d, d=%d, c=%d\n",
502		    pdc_btlb.finfo.num_i,
503		    pdc_btlb.finfo.num_d,
504		    pdc_btlb.finfo.num_c);
505		printf("btlb varbl: i=%d, d=%d, c=%d\n",
506		    pdc_btlb.vinfo.num_i,
507		    pdc_btlb.vinfo.num_d,
508		    pdc_btlb.vinfo.num_c);
509#endif /* BTLBDEBUG */
510		/* purge TLBs and caches */
511		if (pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB,
512		    PDC_BTLB_PURGE_ALL) < 0)
513			printf("WARNING: BTLB purge failed\n");
514
515		if (pdc_btlb.finfo.num_c)
516			cpu_features |= HPPA_FTRS_BTLBU;
517	}
518
519	if (!pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_INFO, &pdc_hwtlb) &&
520	    pdc_hwtlb.min_size && pdc_hwtlb.max_size) {
521		cpu_features |= HPPA_FTRS_HVT;
522		if (pmap_hptsize > pdc_hwtlb.max_size)
523			pmap_hptsize = pdc_hwtlb.max_size;
524		else if (pmap_hptsize && pmap_hptsize < pdc_hwtlb.min_size)
525			pmap_hptsize = pdc_hwtlb.min_size;
526	} else {
527#ifdef DEBUG
528		printf("WARNING: no HPT support, fine!\n");
529#endif
530		pmap_hptsize = 0;
531	}
532
533	if (cpu_type)
534		for (p = cpu_types; p->name[0] && p->cpuid != cpu_type; p++);
535	else
536		for (p = cpu_types;
537		    p->name[0] && p->features != cpu_features; p++);
538
539	if (!p->name[0]) {
540		printf("WARNING: UNKNOWN CPU TYPE; GOOD LUCK "
541		    "(type 0x%x, features 0x%x)\n", cpu_type, cpu_features);
542		p = cpu_types;
543	} else if ((p->type == hpcxl || p->type == hpcxl2) && !fpu_enable) {
544		/* we know PCXL and PCXL2 do not exist w/o FPU */
545		fpu_enable = 0xc0;
546		cpu_fpuena = 1;
547	}
548
549	/*
550	 * TODO: HPT on 7200 is not currently supported
551	 */
552	if (pmap_hptsize && p->type != hpcxl && p->type != hpcxl2)
553		pmap_hptsize = 0;
554
555	cpu_type = p->type;
556	cpu_typename = p->name;
557	cpu_ibtlb_ins = p->ibtlbins;
558	cpu_dbtlb_ins = p->dbtlbins;
559	cpu_hpt_init = p->hptinit;
560	cpu_desidhash = p->desidhash;
561
562	/* patch tlb handler branches */
563	if (p->patch) {
564		trap_ep_T_TLB_DIRTY [0] = trap_ep_T_TLB_DIRTY [p->patch];
565		trap_ep_T_DTLBMISS  [0] = trap_ep_T_DTLBMISS  [p->patch];
566		trap_ep_T_DTLBMISSNA[0] = trap_ep_T_DTLBMISSNA[p->patch];
567		trap_ep_T_ITLBMISS  [0] = trap_ep_T_ITLBMISS  [p->patch];
568		trap_ep_T_ITLBMISSNA[0] = trap_ep_T_ITLBMISSNA[p->patch];
569	}
570
571	/* force strong ordering for now */
572	if (p->features & HPPA_FTRS_W32B) {
573		kpsw |= PSL_O;
574	}
575
576	{
577		const char *p, *q;
578		char buf[32];
579		int lev;
580
581		lev = 0xa + (*cpu_desidhash)();
582		cpu_hvers = pdc_model.hvers >> 4;
583		if (!cpu_hvers) {
584			p = "(UNKNOWN)";
585			q = lev == 0xa? "1.0" : "1.1";
586		} else {
587			p = hppa_mod_info(HPPA_TYPE_BOARD, cpu_hvers);
588			if (!p) {
589				snprintf(buf, sizeof buf, "(UNKNOWN 0x%x)",
590				    cpu_hvers);
591				p = buf;
592			}
593
594			switch (pdc_model.arch_rev) {
595			default:
596			case 0:
597				q = "1.0";
598#ifdef COMPAT_HPUX
599				cpu_model_hpux = HPUX_SYSCONF_CPUPA10;
600#endif
601				break;
602			case 4:
603				q = "1.1";
604#ifdef COMPAT_HPUX
605				cpu_model_hpux = HPUX_SYSCONF_CPUPA11;
606#endif
607				/* this one is just a 100MHz pcxl */
608				if (lev == 0x10)
609					lev = 0xc;
610				/* this one is a pcxl2 */
611				if (lev == 0x16)
612					lev = 0xe;
613				break;
614			case 8:
615				q = "2.0";
616#ifdef COMPAT_HPUX
617				cpu_model_hpux = HPUX_SYSCONF_CPUPA20;
618#endif
619				break;
620			}
621		}
622
623		snprintf(cpu_model, sizeof cpu_model,
624		    "HP 9000/%s PA-RISC %s%x", p, q, lev);
625	}
626#ifdef DEBUG
627	printf("cpu: %s\n", cpu_model);
628#endif
629}
630
631void
632cpu_startup(void)
633{
634	vaddr_t minaddr, maxaddr;
635
636	/*
637	 * i won't understand a friend of mine,
638	 * who sat in a room full of artificial ice,
639	 * fogging the air w/ humid cries --
640	 *	WELCOME TO SUMMER!
641	 */
642	printf(version);
643
644	printf("%s\n", cpu_model);
645	printf("real mem = %u (%uMB)\n", ptoa(physmem),
646	    ptoa(physmem) / 1024 / 1024);
647	printf("rsvd mem = %u (%uKB)\n", ptoa(resvmem), ptoa(resvmem) / 1024);
648
649	/*
650	 * Determine how many buffers to allocate.
651	 * We allocate bufcachepercent% of memory for buffer space.
652	 */
653	if (bufpages == 0)
654		bufpages = physmem * bufcachepercent / 100;
655
656	/*
657	 * Allocate a submap for exec arguments.  This map effectively
658	 * limits the number of processes exec'ing at any time.
659	 */
660	minaddr = vm_map_min(kernel_map);
661	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
662	    16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
663
664	/*
665	 * Allocate a submap for physio
666	 */
667	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
668	    VM_PHYS_SIZE, 0, FALSE, NULL);
669
670	printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free),
671	    ptoa(uvmexp.free) / 1024 / 1024);
672
673	/*
674	 * Set up buffers, so they can be used to read disk labels.
675	 */
676	bufinit();
677	vmmap = uvm_km_valloc_wait(kernel_map, NBPG);
678
679	/*
680	 * Configure the system.
681	 */
682	if (boothowto & RB_CONFIG) {
683#ifdef BOOT_CONFIG
684		user_config();
685#else
686		printf("kernel does not support -c; continuing..\n");
687#endif
688	}
689}
690
691/*
692 * compute cpu clock ratio such as:
693 *	cpu_ticksnum / cpu_ticksdenom = t + delta
694 *	delta -> 0
695 */
696void
697delay_init(void)
698{
699	register u_int num, denom, delta, mdelta;
700
701	mdelta = UINT_MAX;
702	for (denom = 1; denom < 1000; denom++) {
703		num = (PAGE0->mem_10msec * denom) / 10000;
704		delta = num * 10000 / denom - PAGE0->mem_10msec;
705		if (!delta) {
706			cpu_ticksdenom = denom;
707			cpu_ticksnum = num;
708			break;
709		} else if (delta < mdelta) {
710			cpu_ticksdenom = denom;
711			cpu_ticksnum = num;
712			mdelta = delta;
713		}
714	}
715}
716
717void
718delay(us)
719	u_int us;
720{
721	register u_int start, end, n;
722
723	mfctl(CR_ITMR, start);
724	while (us) {
725		n = min(1000, us);
726		end = start + n * cpu_ticksnum / cpu_ticksdenom;
727
728		/* N.B. Interval Timer may wrap around */
729		if (end < start)
730			do
731				mfctl(CR_ITMR, start);
732			while (start > end);
733
734		do
735			mfctl(CR_ITMR, start);
736		while (start < end);
737
738		us -= n;
739	}
740}
741
742static __inline void
743fall(c_base, c_count, c_loop, c_stride, data)
744	int c_base, c_count, c_loop, c_stride, data;
745{
746	register int loop;
747
748	for (; c_count--; c_base += c_stride)
749		for (loop = c_loop; loop--; )
750			if (data)
751				fdce(0, c_base);
752			else
753				fice(0, c_base);
754}
755
756void
757ficacheall(void)
758{
759	/*
760	 * Flush the instruction, then data cache.
761	 */
762	fall(pdc_cache.ic_base, pdc_cache.ic_count, pdc_cache.ic_loop,
763	    pdc_cache.ic_stride, 0);
764	sync_caches();
765}
766
767void
768fdcacheall(void)
769{
770	fall(pdc_cache.dc_base, pdc_cache.dc_count, pdc_cache.dc_loop,
771	    pdc_cache.dc_stride, 1);
772	sync_caches();
773}
774
775void
776ptlball(void)
777{
778	register pa_space_t sp;
779	register int i, j, k;
780
781	/* instruction TLB */
782	sp = pdc_cache.it_sp_base;
783	for (i = 0; i < pdc_cache.it_sp_count; i++) {
784		register vaddr_t off = pdc_cache.it_off_base;
785		for (j = 0; j < pdc_cache.it_off_count; j++) {
786			for (k = 0; k < pdc_cache.it_loop; k++)
787				pitlbe(sp, off);
788			off += pdc_cache.it_off_stride;
789		}
790		sp += pdc_cache.it_sp_stride;
791	}
792
793	/* data TLB */
794	sp = pdc_cache.dt_sp_base;
795	for (i = 0; i < pdc_cache.dt_sp_count; i++) {
796		register vaddr_t off = pdc_cache.dt_off_base;
797		for (j = 0; j < pdc_cache.dt_off_count; j++) {
798			for (k = 0; k < pdc_cache.dt_loop; k++)
799				pdtlbe(sp, off);
800			off += pdc_cache.dt_off_stride;
801		}
802		sp += pdc_cache.dt_sp_stride;
803	}
804}
805
806int
807hpti_g(hpt, hptsize)
808	vaddr_t hpt;
809	vsize_t hptsize;
810{
811	return pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_CONFIG,
812	    &pdc_hwtlb, hpt, hptsize, PDC_TLB_CURRPDE);
813}
814
815int
816pbtlb_g(i)
817	int i;
818{
819	return -1;
820}
821
822int
823ibtlb_g(i, sp, va, pa, sz, prot)
824	int i;
825	pa_space_t sp;
826	vaddr_t va;
827	paddr_t pa;
828	vsize_t sz;
829	u_int prot;
830{
831	int error;
832
833	if ((error = pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB, PDC_BTLB_INSERT,
834	    sp, va, pa, sz, prot, i)) < 0) {
835#ifdef BTLBDEBUG
836		printf("WARNING: BTLB insert failed (%d)\n", error);
837#endif
838	}
839	return error;
840}
841
842int
843btlb_insert(space, va, pa, lenp, prot)
844	pa_space_t space;
845	vaddr_t va;
846	paddr_t pa;
847	vsize_t *lenp;
848	u_int prot;
849{
850	static u_int32_t mask;
851	register vsize_t len;
852	register int error, i, btlb_max;
853
854	if (!pdc_btlb.min_size && !pdc_btlb.max_size)
855		return -(ENXIO);
856
857	/*
858	 * On PCXS processors with split BTLB, we should theoretically
859	 * insert in the IBTLB (if executable mapping requested), and
860	 * into the DBTLB. The PDC documentation is very clear that
861	 * slot numbers are, in order, IBTLB, then DBTLB, then combined
862	 * BTLB.
863	 *
864	 * However it also states that ``successful completion may not mean
865	 * that the entire address range specified in the call has been
866	 * mapped in the block TLB. For both fixed range slots and variable
867	 * range slots, complete coverage of the address range specified
868	 * is not guaranteed. Only a portion of the address range specified
869	 * may get mapped as a result''.
870	 *
871	 * On an HP 9000/720 with PDC ROM v1.2, it turns out that IBTLB
872	 * entries are inserted as expected, but no DBTLB gets inserted
873	 * at all, despite PDC returning success.
874	 *
875	 * So play it dumb, and do not attempt to insert DBTLB entries at
876	 * all on split BTLB systems. Callers are supposed to be able to
877	 * cope with this.
878	 */
879
880	if (pdc_btlb.finfo.num_c == 0) {
881		if ((prot & TLB_EXECUTE) == 0)
882			return -(EINVAL);
883
884		btlb_max = pdc_btlb.finfo.num_i;
885	} else {
886		btlb_max = pdc_btlb.finfo.num_c;
887	}
888
889	/* align size */
890	for (len = pdc_btlb.min_size << PGSHIFT; len < *lenp; len <<= 1);
891	len >>= PGSHIFT;
892	i = ffs(~mask) - 1;
893	if (len > pdc_btlb.max_size || i < 0 || i >= btlb_max) {
894#ifdef BTLBDEBUG
895		printf("btln_insert: too big (%u < %u < %u)\n",
896		    pdc_btlb.min_size, len, pdc_btlb.max_size);
897#endif
898		return -(ENOMEM);
899	}
900
901	mask |= 1 << i;
902	pa >>= PGSHIFT;
903	va >>= PGSHIFT;
904	/* check address alignment */
905	if (pa & (len - 1)) {
906#ifdef BTLBDEBUG
907		printf("WARNING: BTLB address misaligned pa=0x%x, len=0x%x\n",
908		    pa, len);
909#endif
910		return -(ERANGE);
911	}
912
913	/* ensure IO space is uncached */
914	if ((pa & (HPPA_IOBEGIN >> PGSHIFT)) == (HPPA_IOBEGIN >> PGSHIFT))
915		prot |= TLB_UNCACHABLE;
916
917#ifdef BTLBDEBUG
918	printf("btlb_insert(%d): %x:%x=%x[%x,%x]\n",
919	    i, space, va, pa, len, prot);
920#endif
921	if ((error = (*cpu_dbtlb_ins)(i, space, va, pa, len, prot)) < 0)
922		return -(EINVAL);
923	*lenp = len << PGSHIFT;
924
925	return i;
926}
927
928int waittime = -1;
929
930void
931boot(howto)
932	int howto;
933{
934	/* If system is cold, just halt. */
935	if (cold) {
936		/* (Unless the user explicitly asked for reboot.) */
937		if ((howto & RB_USERREQ) == 0)
938			howto |= RB_HALT;
939	} else {
940
941		boothowto = howto | (boothowto & RB_HALT);
942
943		if (!(howto & RB_NOSYNC)) {
944			waittime = 0;
945			vfs_shutdown();
946			/*
947			 * If we've been adjusting the clock, the todr
948			 * will be out of synch; adjust it now unless
949			 * the system was sitting in ddb.
950			 */
951			if ((howto & RB_TIMEBAD) == 0)
952				resettodr();
953			else
954				printf("WARNING: not updating battery clock\n");
955		}
956
957		/* XXX probably save howto into stable storage */
958
959		uvm_shutdown();
960		splhigh();
961
962		if (howto & RB_DUMP)
963			dumpsys();
964
965		doshutdownhooks();
966	}
967
968	/* in case we came on powerfail interrupt */
969	if (cold_hook)
970		(*cold_hook)(HPPA_COLD_COLD);
971
972	if (howto & RB_HALT) {
973		if (howto & RB_POWERDOWN && cold_hook) {
974			printf("Powering off...");
975			DELAY(2000000);
976			(*cold_hook)(HPPA_COLD_OFF);
977			DELAY(1000000);
978		}
979
980		printf("System halted!\n");
981		DELAY(2000000);
982		__asm __volatile("stwas %0, 0(%1)"
983		    :: "r" (CMD_STOP), "r" (HPPA_LBCAST + iomod_command));
984	} else {
985		printf("rebooting...");
986		DELAY(2000000);
987
988		/* ask firmware to reset */
989                pdc_call((iodcio_t)pdc, 0, PDC_BROADCAST_RESET, PDC_DO_RESET);
990
991		/* forcably reset module if that fails */
992		__asm __volatile(".export hppa_reset, entry\n\t"
993		    ".label hppa_reset");
994		__asm __volatile("stwas %0, 0(%1)"
995		    :: "r" (CMD_RESET), "r" (HPPA_LBCAST + iomod_command));
996	}
997
998	for(;;); /* loop while bus reset is comming up */
999	/* NOTREACHED */
1000}
1001
1002u_long	dumpmag = 0x8fca0101;	/* magic number */
1003int	dumpsize = 0;		/* pages */
1004long	dumplo = 0;		/* blocks */
1005
1006/*
1007 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
1008 */
1009int
1010cpu_dumpsize(void)
1011{
1012	int size;
1013
1014	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
1015	if (roundup(size, dbtob(1)) != dbtob(1))
1016		return -1;
1017
1018	return 1;
1019}
1020
1021/*
1022 * Called from HPMC handler in locore
1023 */
1024void
1025hpmc_dump(void)
1026{
1027	printf("HPMC\n");
1028
1029	cold = 0;
1030	boot(RB_NOSYNC);
1031}
1032
1033int
1034cpu_dump(void)
1035{
1036	long buf[dbtob(1) / sizeof (long)];
1037	kcore_seg_t	*segp;
1038	cpu_kcore_hdr_t	*cpuhdrp;
1039
1040	segp = (kcore_seg_t *)buf;
1041	cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp)) / sizeof (long)];
1042
1043	/*
1044	 * Generate a segment header.
1045	 */
1046	CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1047	segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
1048
1049	/*
1050	 * Add the machine-dependent header info
1051	 */
1052	/* nothing for now */
1053
1054	return (bdevsw[major(dumpdev)].d_dump)
1055	    (dumpdev, dumplo, (caddr_t)buf, dbtob(1));
1056}
1057
1058/*
1059 * Dump the kernel's image to the swap partition.
1060 */
1061#define	BYTES_PER_DUMP	NBPG
1062
1063void
1064dumpsys(void)
1065{
1066	int psize, bytes, i, n;
1067	caddr_t maddr;
1068	daddr64_t blkno;
1069	int (*dump)(dev_t, daddr64_t, caddr_t, size_t);
1070	int error;
1071
1072	/* Save registers
1073	savectx(&dumppcb); */
1074
1075	if (dumpsize == 0)
1076		dumpconf();
1077	if (dumplo <= 0) {
1078		printf("\ndump to dev %x not possible\n", dumpdev);
1079		return;
1080	}
1081	printf("\ndumping to dev %x, offset %ld\n", dumpdev, dumplo);
1082
1083#ifdef UVM_SWAP_ENCRYPT
1084	uvm_swap_finicrypt_all();
1085#endif
1086
1087	psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
1088	printf("dump ");
1089	if (psize == -1) {
1090		printf("area unavailable\n");
1091		return;
1092	}
1093
1094	if (!(error = cpu_dump())) {
1095
1096		bytes = ptoa(physmem);
1097		maddr = NULL;
1098		blkno = dumplo + cpu_dumpsize();
1099		dump = bdevsw[major(dumpdev)].d_dump;
1100		/* TODO block map the whole physical memory */
1101		for (i = 0; i < bytes; i += n) {
1102
1103			/* Print out how many MBs we are to go. */
1104			n = bytes - i;
1105			if (n && (n % (1024*1024)) == 0)
1106				printf("%d ", n / (1024 * 1024));
1107
1108			/* Limit size for next transfer. */
1109
1110			if (n > BYTES_PER_DUMP)
1111				n = BYTES_PER_DUMP;
1112
1113			if ((error = (*dump)(dumpdev, blkno, maddr, n)))
1114				break;
1115			maddr += n;
1116			blkno += btodb(n);
1117		}
1118	}
1119
1120	switch (error) {
1121	case ENXIO:	printf("device bad\n");			break;
1122	case EFAULT:	printf("device not ready\n");		break;
1123	case EINVAL:	printf("area improper\n");		break;
1124	case EIO:	printf("i/o error\n");			break;
1125	case EINTR:	printf("aborted from console\n");	break;
1126	case 0:		printf("succeeded\n");			break;
1127	default:	printf("error %d\n", error);		break;
1128	}
1129}
1130
1131/* bcopy(), error on fault */
1132int
1133kcopy(from, to, size)
1134	const void *from;
1135	void *to;
1136	size_t size;
1137{
1138	return spcopy(HPPA_SID_KERNEL, from, HPPA_SID_KERNEL, to, size);
1139}
1140
1141int
1142copystr(src, dst, size, lenp)
1143	const void *src;
1144	void *dst;
1145	size_t size;
1146	size_t *lenp;
1147{
1148	return spstrcpy(HPPA_SID_KERNEL, src, HPPA_SID_KERNEL, dst, size, lenp);
1149}
1150
1151int
1152copyinstr(src, dst, size, lenp)
1153	const void *src;
1154	void *dst;
1155	size_t size;
1156	size_t *lenp;
1157{
1158	return spstrcpy(curproc->p_addr->u_pcb.pcb_space, src,
1159	    HPPA_SID_KERNEL, dst, size, lenp);
1160}
1161
1162
1163int
1164copyoutstr(src, dst, size, lenp)
1165	const void *src;
1166	void *dst;
1167	size_t size;
1168	size_t *lenp;
1169{
1170	return spstrcpy(HPPA_SID_KERNEL, src,
1171	    curproc->p_addr->u_pcb.pcb_space, dst, size, lenp);
1172}
1173
1174
1175int
1176copyin(src, dst, size)
1177	const void *src;
1178	void *dst;
1179	size_t size;
1180{
1181	return spcopy(curproc->p_addr->u_pcb.pcb_space, src,
1182	    HPPA_SID_KERNEL, dst, size);
1183}
1184
1185int
1186copyout(src, dst, size)
1187	const void *src;
1188	void *dst;
1189	size_t size;
1190{
1191	return spcopy(HPPA_SID_KERNEL, src,
1192	    curproc->p_addr->u_pcb.pcb_space, dst, size);
1193}
1194
1195/*
1196 * Set registers on exec.
1197 */
1198void
1199setregs(p, pack, stack, retval)
1200	struct proc *p;
1201	struct exec_package *pack;
1202	u_long stack;
1203	register_t *retval;
1204{
1205	extern paddr_t fpu_curpcb;	/* from locore.S */
1206	struct trapframe *tf = p->p_md.md_regs;
1207	struct pcb *pcb = &p->p_addr->u_pcb;
1208	register_t zero;
1209
1210	tf->tf_flags = TFF_SYS|TFF_LAST;
1211	tf->tf_iioq_tail = 4 +
1212	    (tf->tf_iioq_head = pack->ep_entry | HPPA_PC_PRIV_USER);
1213	tf->tf_rp = 0;
1214	tf->tf_arg0 = (u_long)PS_STRINGS;
1215	tf->tf_arg1 = tf->tf_arg2 = 0; /* XXX dynload stuff */
1216
1217	/* setup terminal stack frame */
1218	stack = (stack + 0x1f) & ~0x1f;
1219	tf->tf_r3 = stack;
1220	tf->tf_sp = stack += HPPA_FRAME_SIZE;
1221	zero = 0;
1222	copyout(&zero, (caddr_t)(stack - HPPA_FRAME_SIZE), sizeof(register_t));
1223	copyout(&zero, (caddr_t)(stack + HPPA_FRAME_CRP), sizeof(register_t));
1224
1225	/* reset any of the pending FPU exceptions */
1226	if (tf->tf_cr30 == fpu_curpcb) {
1227		fpu_exit();
1228		fpu_curpcb = 0;
1229	}
1230	pcb->pcb_fpregs[0] = ((u_int64_t)HPPA_FPU_INIT) << 32;
1231	pcb->pcb_fpregs[1] = 0;
1232	pcb->pcb_fpregs[2] = 0;
1233	pcb->pcb_fpregs[3] = 0;
1234	fdcache(HPPA_SID_KERNEL, (vaddr_t)pcb->pcb_fpregs, 8 * 4);
1235
1236	retval[1] = 0;
1237}
1238
1239/*
1240 * Send an interrupt to process.
1241 */
1242void
1243sendsig(catcher, sig, mask, code, type, val)
1244	sig_t catcher;
1245	int sig, mask;
1246	u_long code;
1247	int type;
1248	union sigval val;
1249{
1250	extern paddr_t fpu_curpcb;	/* from locore.S */
1251	extern u_int fpu_enable;
1252	struct proc *p = curproc;
1253	struct trapframe *tf = p->p_md.md_regs;
1254	struct pcb *pcb = &p->p_addr->u_pcb;
1255	struct sigacts *psp = p->p_sigacts;
1256	struct sigcontext ksc;
1257	siginfo_t ksi;
1258	register_t scp, sip;
1259	int sss;
1260
1261#ifdef DEBUG
1262	if ((sigdebug & SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1263		printf("sendsig: %s[%d] sig %d catcher %p\n",
1264		    p->p_comm, p->p_pid, sig, catcher);
1265#endif
1266
1267	/* flush the FPU ctx first */
1268	if (tf->tf_cr30 == fpu_curpcb) {
1269		mtctl(fpu_enable, CR_CCR);
1270		fpu_save(fpu_curpcb);
1271		/* fpu_curpcb = 0; only needed if fpregs are preset */
1272		mtctl(0, CR_CCR);
1273	}
1274
1275	ksc.sc_onstack = psp->ps_sigstk.ss_flags & SS_ONSTACK;
1276
1277	/*
1278	 * Allocate space for the signal handler context.
1279	 */
1280	if ((psp->ps_flags & SAS_ALTSTACK) && !ksc.sc_onstack &&
1281	    (psp->ps_sigonstack & sigmask(sig))) {
1282		scp = (register_t)psp->ps_sigstk.ss_sp;
1283		psp->ps_sigstk.ss_flags |= SS_ONSTACK;
1284	} else
1285		scp = (tf->tf_sp + 63) & ~63;
1286
1287	sss = (sizeof(ksc) + 63) & ~63;
1288	sip = 0;
1289	if (psp->ps_siginfo & sigmask(sig)) {
1290		sip = scp + sizeof(ksc);
1291		sss += (sizeof(ksi) + 63) & ~63;
1292	}
1293
1294#ifdef DEBUG
1295	if ((tf->tf_iioq_head & ~PAGE_MASK) == SYSCALLGATE)
1296		printf("sendsig: interrupted syscall at 0x%x:0x%x flags %b\n",
1297		    tf->tf_iioq_head, tf->tf_iioq_tail, tf->tf_ipsw, PSL_BITS);
1298#endif
1299
1300	ksc.sc_mask = mask;
1301	ksc.sc_fp = scp + sss;
1302	ksc.sc_ps = tf->tf_ipsw;
1303	ksc.sc_pcoqh = tf->tf_iioq_head;
1304	ksc.sc_pcoqt = tf->tf_iioq_tail;
1305	ksc.sc_regs[0] = tf->tf_t1;
1306	ksc.sc_regs[1] = tf->tf_t2;
1307	ksc.sc_regs[2] = tf->tf_sp;
1308	ksc.sc_regs[3] = tf->tf_t3;
1309	ksc.sc_regs[4] = tf->tf_sar;
1310	ksc.sc_regs[5] = tf->tf_r1;
1311	ksc.sc_regs[6] = tf->tf_rp;
1312	ksc.sc_regs[7] = tf->tf_r3;
1313	ksc.sc_regs[8] = tf->tf_r4;
1314	ksc.sc_regs[9] = tf->tf_r5;
1315	ksc.sc_regs[10] = tf->tf_r6;
1316	ksc.sc_regs[11] = tf->tf_r7;
1317	ksc.sc_regs[12] = tf->tf_r8;
1318	ksc.sc_regs[13] = tf->tf_r9;
1319	ksc.sc_regs[14] = tf->tf_r10;
1320	ksc.sc_regs[15] = tf->tf_r11;
1321	ksc.sc_regs[16] = tf->tf_r12;
1322	ksc.sc_regs[17] = tf->tf_r13;
1323	ksc.sc_regs[18] = tf->tf_r14;
1324	ksc.sc_regs[19] = tf->tf_r15;
1325	ksc.sc_regs[20] = tf->tf_r16;
1326	ksc.sc_regs[21] = tf->tf_r17;
1327	ksc.sc_regs[22] = tf->tf_r18;
1328	ksc.sc_regs[23] = tf->tf_t4;
1329	ksc.sc_regs[24] = tf->tf_arg3;
1330	ksc.sc_regs[25] = tf->tf_arg2;
1331	ksc.sc_regs[26] = tf->tf_arg1;
1332	ksc.sc_regs[27] = tf->tf_arg0;
1333	ksc.sc_regs[28] = tf->tf_dp;
1334	ksc.sc_regs[29] = tf->tf_ret0;
1335	ksc.sc_regs[30] = tf->tf_ret1;
1336	ksc.sc_regs[31] = tf->tf_r31;
1337	bcopy(p->p_addr->u_pcb.pcb_fpregs, ksc.sc_fpregs,
1338	    sizeof(ksc.sc_fpregs));
1339
1340	sss += HPPA_FRAME_SIZE;
1341	tf->tf_arg0 = sig;
1342	tf->tf_arg1 = sip;
1343	tf->tf_arg2 = tf->tf_r4 = scp;
1344	tf->tf_arg3 = (register_t)catcher;
1345	tf->tf_sp = scp + sss;
1346	tf->tf_ipsw &= ~(PSL_N|PSL_B);
1347	tf->tf_iioq_head = HPPA_PC_PRIV_USER | p->p_sigcode;
1348	tf->tf_iioq_tail = tf->tf_iioq_head + 4;
1349	tf->tf_iisq_tail = tf->tf_iisq_head = pcb->pcb_space;
1350	/* disable tracing in the trapframe */
1351
1352#ifdef DEBUG
1353	if ((sigdebug & SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1354		printf("sendsig(%d): sig %d scp %p fp %p sp 0x%x\n",
1355		    p->p_pid, sig, scp, ksc.sc_fp, (register_t)scp + sss);
1356#endif
1357
1358	if (copyout(&ksc, (void *)scp, sizeof(ksc)))
1359		sigexit(p, SIGILL);
1360
1361	if (sip) {
1362		initsiginfo(&ksi, sig, code, type, val);
1363		if (copyout(&ksi, (void *)sip, sizeof(ksi)))
1364			sigexit(p, SIGILL);
1365	}
1366
1367	if (copyout(&tf->tf_r3, (caddr_t)(tf->tf_sp - HPPA_FRAME_SIZE),
1368	    sizeof(register_t)))
1369		sigexit(p, SIGILL);
1370	tf->tf_r3 = tf->tf_sp - HPPA_FRAME_SIZE;
1371
1372#ifdef DEBUG
1373	if ((sigdebug & SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1374		printf("sendsig(%d): pc 0x%x catcher 0x%x\n", p->p_pid,
1375		    tf->tf_iioq_head, tf->tf_arg3);
1376#endif
1377}
1378
1379int
1380sys_sigreturn(p, v, retval)
1381	struct proc *p;
1382	void *v;
1383	register_t *retval;
1384{
1385	extern paddr_t fpu_curpcb;	/* from locore.S */
1386	struct sys_sigreturn_args /* {
1387		syscallarg(struct sigcontext *) sigcntxp;
1388	} */ *uap = v;
1389	struct sigcontext *scp, ksc;
1390	struct trapframe *tf = p->p_md.md_regs;
1391	int error;
1392
1393	scp = SCARG(uap, sigcntxp);
1394#ifdef DEBUG
1395	if ((sigdebug & SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1396		printf("sigreturn: pid %d, scp %p\n", p->p_pid, scp);
1397#endif
1398
1399	/* flush the FPU ctx first */
1400	if (tf->tf_cr30 == fpu_curpcb) {
1401		fpu_exit();
1402		fpu_curpcb = 0;
1403	}
1404
1405	if ((error = copyin((caddr_t)scp, (caddr_t)&ksc, sizeof ksc)))
1406		return (error);
1407
1408#define PSL_MBS (PSL_C|PSL_Q|PSL_P|PSL_D|PSL_I)
1409#define PSL_MBZ (PSL_Y|PSL_Z|PSL_S|PSL_X|PSL_M|PSL_R)
1410	if ((ksc.sc_ps & (PSL_MBS|PSL_MBZ)) != PSL_MBS)
1411		return (EINVAL);
1412
1413	if (ksc.sc_onstack)
1414		p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK;
1415	else
1416		p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK;
1417	p->p_sigmask = ksc.sc_mask &~ sigcantmask;
1418
1419	tf->tf_t1 = ksc.sc_regs[0];		/* r22 */
1420	tf->tf_t2 = ksc.sc_regs[1];		/* r21 */
1421	tf->tf_sp = ksc.sc_regs[2];
1422	tf->tf_t3 = ksc.sc_regs[3];		/* r20 */
1423	tf->tf_sar = ksc.sc_regs[4];
1424	tf->tf_r1 = ksc.sc_regs[5];
1425	tf->tf_rp = ksc.sc_regs[6];
1426	tf->tf_r3 = ksc.sc_regs[7];
1427	tf->tf_r4 = ksc.sc_regs[8];
1428	tf->tf_r5 = ksc.sc_regs[9];
1429	tf->tf_r6 = ksc.sc_regs[10];
1430	tf->tf_r7 = ksc.sc_regs[11];
1431	tf->tf_r8 = ksc.sc_regs[12];
1432	tf->tf_r9 = ksc.sc_regs[13];
1433	tf->tf_r10 = ksc.sc_regs[14];
1434	tf->tf_r11 = ksc.sc_regs[15];
1435	tf->tf_r12 = ksc.sc_regs[16];
1436	tf->tf_r13 = ksc.sc_regs[17];
1437	tf->tf_r14 = ksc.sc_regs[18];
1438	tf->tf_r15 = ksc.sc_regs[19];
1439	tf->tf_r16 = ksc.sc_regs[20];
1440	tf->tf_r17 = ksc.sc_regs[21];
1441	tf->tf_r18 = ksc.sc_regs[22];
1442	tf->tf_t4 = ksc.sc_regs[23];		/* r19 */
1443	tf->tf_arg3 = ksc.sc_regs[24];		/* r23 */
1444	tf->tf_arg2 = ksc.sc_regs[25];		/* r24 */
1445	tf->tf_arg1 = ksc.sc_regs[26];		/* r25 */
1446	tf->tf_arg0 = ksc.sc_regs[27];		/* r26 */
1447	tf->tf_dp = ksc.sc_regs[28];
1448	tf->tf_ret0 = ksc.sc_regs[29];
1449	tf->tf_ret1 = ksc.sc_regs[30];
1450	tf->tf_r31 = ksc.sc_regs[31];
1451	bcopy(ksc.sc_fpregs, p->p_addr->u_pcb.pcb_fpregs,
1452	    sizeof(ksc.sc_fpregs));
1453	fdcache(HPPA_SID_KERNEL, (vaddr_t)p->p_addr->u_pcb.pcb_fpregs,
1454	    sizeof(ksc.sc_fpregs));
1455
1456	tf->tf_iioq_head = ksc.sc_pcoqh | HPPA_PC_PRIV_USER;
1457	tf->tf_iioq_tail = ksc.sc_pcoqt | HPPA_PC_PRIV_USER;
1458	if ((tf->tf_iioq_head & ~PAGE_MASK) == SYSCALLGATE)
1459		tf->tf_iisq_head = HPPA_SID_KERNEL;
1460	else
1461		tf->tf_iisq_head = p->p_addr->u_pcb.pcb_space;
1462	if ((tf->tf_iioq_tail & ~PAGE_MASK) == SYSCALLGATE)
1463		tf->tf_iisq_tail = HPPA_SID_KERNEL;
1464	else
1465		tf->tf_iisq_tail = p->p_addr->u_pcb.pcb_space;
1466	tf->tf_ipsw = ksc.sc_ps | (kpsw & PSL_O);
1467
1468#ifdef DEBUG
1469	if ((sigdebug & SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1470		printf("sigreturn(%d): returns\n", p->p_pid);
1471#endif
1472	return (EJUSTRETURN);
1473}
1474
1475#ifdef COMPAT_HPUX
1476void
1477hpux_sendsig(sig_t catcher, int sig, int mask, u_long code, int type,
1478    union sigval val)
1479{
1480	extern paddr_t fpu_curpcb;	/* from locore.S */
1481	extern u_int fpu_enable;
1482	struct proc *p = curproc;
1483	struct pcb *pcb = &p->p_addr->u_pcb;
1484	struct trapframe *tf = p->p_md.md_regs;
1485	struct sigacts *psp = p->p_sigacts;
1486	struct hpux_sigcontext hsc;
1487	int sss;
1488	register_t scp;
1489
1490#ifdef DEBUG
1491	if ((sigdebug & SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1492		printf("hpux_sendsig: %s[%d] sig %d catcher %p\n",
1493		    p->p_comm, p->p_pid, sig, catcher);
1494#endif
1495	/* flush the FPU ctx first */
1496	if (tf->tf_cr30 == fpu_curpcb) {
1497		mtctl(fpu_enable, CR_CCR);
1498		fpu_save(fpu_curpcb);
1499		fpu_curpcb = 0;
1500		mtctl(0, CR_CCR);
1501	}
1502
1503	bzero(&hsc, sizeof hsc);
1504	hsc.sc_onstack = psp->ps_sigstk.ss_flags & SS_ONSTACK;
1505	hsc.sc_omask = mask;
1506	/* sc_scact ??? */
1507
1508	hsc.sc_ret0 = tf->tf_ret0;
1509	hsc.sc_ret1 = tf->tf_ret1;
1510
1511	hsc.sc_frame[0] = hsc.sc_args[0] = sig;
1512	hsc.sc_frame[1] = hsc.sc_args[1] = NULL;
1513	hsc.sc_frame[2] = hsc.sc_args[2] = scp;
1514
1515	/*
1516	 * Allocate space for the signal handler context.
1517	 */
1518	if ((psp->ps_flags & SAS_ALTSTACK) && !hsc.sc_onstack &&
1519	    (psp->ps_sigonstack & sigmask(sig))) {
1520		scp = (register_t)psp->ps_sigstk.ss_sp;
1521		psp->ps_sigstk.ss_flags |= SS_ONSTACK;
1522	} else
1523		scp = (tf->tf_sp + 63) & ~63;
1524
1525	sss = (sizeof(hsc) + 63) & ~63;
1526
1527	if (tf->tf_flags & TFF_SYS) {
1528		hsc.sc_tfflags = HPUX_TFF_SYSCALL;
1529		hsc.sc_syscall = tf->tf_t1;
1530	} else if (tf->tf_flags & TFF_INTR)
1531		hsc.sc_tfflags = HPUX_TFF_INTR;
1532	else
1533		hsc.sc_tfflags = HPUX_TFF_TRAP;
1534
1535	hsc.sc_regs[0] = tf->tf_r1;
1536	hsc.sc_regs[1] = tf->tf_rp;
1537	hsc.sc_regs[2] = tf->tf_r3;
1538	hsc.sc_regs[3] = tf->tf_r4;
1539	hsc.sc_regs[4] = tf->tf_r5;
1540	hsc.sc_regs[5] = tf->tf_r6;
1541	hsc.sc_regs[6] = tf->tf_r7;
1542	hsc.sc_regs[7] = tf->tf_r8;
1543	hsc.sc_regs[8] = tf->tf_r9;
1544	hsc.sc_regs[9] = tf->tf_r10;
1545	hsc.sc_regs[10] = tf->tf_r11;
1546	hsc.sc_regs[11] = tf->tf_r12;
1547	hsc.sc_regs[12] = tf->tf_r13;
1548	hsc.sc_regs[13] = tf->tf_r14;
1549	hsc.sc_regs[14] = tf->tf_r15;
1550	hsc.sc_regs[15] = tf->tf_r16;
1551	hsc.sc_regs[16] = tf->tf_r17;
1552	hsc.sc_regs[17] = tf->tf_r18;
1553	hsc.sc_regs[18] = tf->tf_t4;
1554	hsc.sc_regs[19] = tf->tf_t3;
1555	hsc.sc_regs[20] = tf->tf_t2;
1556	hsc.sc_regs[21] = tf->tf_t1;
1557	hsc.sc_regs[22] = tf->tf_arg3;
1558	hsc.sc_regs[23] = tf->tf_arg2;
1559	hsc.sc_regs[24] = tf->tf_arg1;
1560	hsc.sc_regs[25] = tf->tf_arg0;
1561	hsc.sc_regs[26] = tf->tf_dp;
1562	hsc.sc_regs[27] = tf->tf_ret0;
1563	hsc.sc_regs[28] = tf->tf_ret1;
1564	hsc.sc_regs[29] = tf->tf_sp;
1565	hsc.sc_regs[30] = tf->tf_r31;
1566	hsc.sc_regs[31] = tf->tf_sar;
1567	hsc.sc_regs[32] = tf->tf_iioq_head;
1568	hsc.sc_regs[33] = tf->tf_iisq_head;
1569	hsc.sc_regs[34] = tf->tf_iioq_tail;
1570	hsc.sc_regs[35] = tf->tf_iisq_tail;
1571	hsc.sc_regs[35] = tf->tf_eiem;
1572	hsc.sc_regs[36] = tf->tf_iir;
1573	hsc.sc_regs[37] = tf->tf_isr;
1574	hsc.sc_regs[38] = tf->tf_ior;
1575	hsc.sc_regs[39] = tf->tf_ipsw;
1576	hsc.sc_regs[40] = 0;
1577	hsc.sc_regs[41] = tf->tf_sr4;
1578	hsc.sc_regs[42] = tf->tf_sr0;
1579	hsc.sc_regs[43] = tf->tf_sr1;
1580	hsc.sc_regs[44] = tf->tf_sr2;
1581	hsc.sc_regs[45] = tf->tf_sr3;
1582	hsc.sc_regs[46] = tf->tf_sr5;
1583	hsc.sc_regs[47] = tf->tf_sr6;
1584	hsc.sc_regs[48] = tf->tf_sr7;
1585	hsc.sc_regs[49] = tf->tf_rctr;
1586	hsc.sc_regs[50] = tf->tf_pidr1;
1587	hsc.sc_regs[51] = tf->tf_pidr2;
1588	hsc.sc_regs[52] = tf->tf_ccr;
1589	hsc.sc_regs[53] = tf->tf_pidr3;
1590	hsc.sc_regs[54] = tf->tf_pidr4;
1591	/* hsc.sc_regs[55] = tf->tf_cr24; */
1592	hsc.sc_regs[56] = tf->tf_vtop;
1593	/* hsc.sc_regs[57] = tf->tf_cr26; */
1594	/* hsc.sc_regs[58] = tf->tf_cr27; */
1595	hsc.sc_regs[59] = 0;
1596	hsc.sc_regs[60] = 0;
1597	bcopy(p->p_addr->u_pcb.pcb_fpregs, hsc.sc_fpregs,
1598	    sizeof(hsc.sc_fpregs));
1599
1600	tf->tf_rp = (register_t)pcb->pcb_sigreturn;
1601	tf->tf_arg3 = (register_t)catcher;
1602	tf->tf_sp = scp + sss;
1603	tf->tf_ipsw &= ~(PSL_N|PSL_B);
1604	tf->tf_iioq_head = HPPA_PC_PRIV_USER | p->p_sigcode;
1605	tf->tf_iioq_tail = tf->tf_iioq_head + 4;
1606
1607	if (copyout(&hsc, (void *)scp, sizeof(hsc)))
1608		sigexit(p, SIGILL);
1609
1610#ifdef DEBUG
1611	if ((sigdebug & SDB_FOLLOW) && (!sigpid || p->p_pid == sigpid))
1612		printf("sendsig(%d): pc 0x%x rp 0x%x\n", p->p_pid,
1613		    tf->tf_iioq_head, tf->tf_rp);
1614#endif
1615}
1616#endif
1617
1618/*
1619 * machine dependent system variables.
1620 */
1621int
1622cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1623	int *name;
1624	u_int namelen;
1625	void *oldp;
1626	size_t *oldlenp;
1627	void *newp;
1628	size_t newlen;
1629	struct proc *p;
1630{
1631	extern paddr_t fpu_curpcb;	/* from locore.S */
1632	extern u_int fpu_enable;
1633	extern int cpu_fpuena;
1634	dev_t consdev;
1635	int oldval, ret;
1636
1637	/* all sysctl names at this level are terminal */
1638	if (namelen != 1)
1639		return (ENOTDIR);	/* overloaded */
1640	switch (name[0]) {
1641	case CPU_CONSDEV:
1642		if (cn_tab != NULL)
1643			consdev = cn_tab->cn_dev;
1644		else
1645			consdev = NODEV;
1646		return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
1647		    sizeof consdev));
1648	case CPU_FPU:
1649		if (fpu_curpcb) {
1650			mtctl(fpu_enable, CR_CCR);
1651			fpu_save(fpu_curpcb);
1652			fpu_curpcb = 0;
1653			mtctl(0, CR_CCR);
1654		}
1655		return (sysctl_int(oldp, oldlenp, newp, newlen, &cpu_fpuena));
1656	case CPU_LED_BLINK:
1657		oldval = led_blink;
1658		ret = sysctl_int(oldp, oldlenp, newp, newlen, &led_blink);
1659		/*
1660		 * If we were false and are now true, start the timer.
1661		 */
1662		if (!oldval && led_blink > oldval)
1663			blink_led_timeout(NULL);
1664		return (ret);
1665	default:
1666		return (EOPNOTSUPP);
1667	}
1668	/* NOTREACHED */
1669}
1670
1671
1672/*
1673 * consinit:
1674 * initialize the system console.
1675 */
1676void
1677consinit(void)
1678{
1679	/*
1680	 * Initial console setup has been done in pdc_init().
1681	 */
1682}
1683
1684
1685struct blink_led_softc {
1686	SLIST_HEAD(, blink_led) bls_head;
1687	int bls_on;
1688	struct timeout bls_to;
1689} blink_sc = { SLIST_HEAD_INITIALIZER(bls_head), 0 };
1690
1691void
1692blink_led_register(struct blink_led *l)
1693{
1694	if (SLIST_EMPTY(&blink_sc.bls_head)) {
1695		timeout_set(&blink_sc.bls_to, blink_led_timeout, &blink_sc);
1696		blink_sc.bls_on = 0;
1697		if (led_blink)
1698			timeout_add(&blink_sc.bls_to, 1);
1699	}
1700	SLIST_INSERT_HEAD(&blink_sc.bls_head, l, bl_next);
1701}
1702
1703void
1704blink_led_timeout(void *vsc)
1705{
1706	struct blink_led_softc *sc = &blink_sc;
1707	struct blink_led *l;
1708	int t;
1709
1710	if (SLIST_EMPTY(&sc->bls_head))
1711		return;
1712
1713	SLIST_FOREACH(l, &sc->bls_head, bl_next) {
1714		(*l->bl_func)(l->bl_arg, sc->bls_on);
1715	}
1716	sc->bls_on = !sc->bls_on;
1717
1718	if (!led_blink)
1719		return;
1720
1721	/*
1722	 * Blink rate is:
1723	 *      full cycle every second if completely idle (loadav = 0)
1724	 *      full cycle every 2 seconds if loadav = 1
1725	 *      full cycle every 3 seconds if loadav = 2
1726	 * etc.
1727	 */
1728	t = (((averunnable.ldavg[0] + FSCALE) * hz) >> (FSHIFT + 1));
1729	timeout_add(&sc->bls_to, t);
1730}
1731