machdep.c revision 122368
1/*-
2 * Copyright (c) 2000,2001 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/ia64/ia64/machdep.c 122368 2003-11-09 22:17:36Z marcel $
27 */
28
29#include "opt_compat.h"
30#include "opt_ddb.h"
31#include "opt_kstack_pages.h"
32#include "opt_msgbuf.h"
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/eventhandler.h>
37#include <sys/sysproto.h>
38#include <sys/signalvar.h>
39#include <sys/imgact.h>
40#include <sys/kernel.h>
41#include <sys/proc.h>
42#include <sys/lock.h>
43#include <sys/pcpu.h>
44#include <sys/malloc.h>
45#include <sys/reboot.h>
46#include <sys/bio.h>
47#include <sys/buf.h>
48#include <sys/mbuf.h>
49#include <sys/vmmeter.h>
50#include <sys/msgbuf.h>
51#include <sys/exec.h>
52#include <sys/sysctl.h>
53#include <sys/uio.h>
54#include <sys/linker.h>
55#include <sys/random.h>
56#include <sys/cons.h>
57#include <sys/uuid.h>
58#include <net/netisr.h>
59#include <vm/vm.h>
60#include <vm/vm_kern.h>
61#include <vm/vm_page.h>
62#include <vm/vm_map.h>
63#include <vm/vm_extern.h>
64#include <vm/vm_object.h>
65#include <vm/vm_pager.h>
66#include <sys/user.h>
67#include <sys/ptrace.h>
68#include <machine/clock.h>
69#include <machine/cpu.h>
70#include <machine/md_var.h>
71#include <machine/reg.h>
72#include <machine/fpu.h>
73#include <machine/mca.h>
74#include <machine/pal.h>
75#include <machine/sal.h>
76#ifdef SMP
77#include <machine/smp.h>
78#endif
79#include <machine/bootinfo.h>
80#include <machine/mutex.h>
81#include <machine/vmparam.h>
82#include <machine/elf.h>
83#include <ddb/ddb.h>
84#include <sys/vnode.h>
85#include <sys/ucontext.h>
86#include <machine/sigframe.h>
87#include <machine/efi.h>
88#include <machine/unwind.h>
89#include <i386/include/specialreg.h>
90
91u_int64_t processor_frequency;
92u_int64_t bus_frequency;
93u_int64_t itc_frequency;
94int cold = 1;
95
96u_int64_t pa_bootinfo;
97struct bootinfo bootinfo;
98
99struct pcpu early_pcpu;
100extern char kstack[];
101struct user *proc0uarea;
102vm_offset_t proc0kstack;
103
104extern u_int64_t kernel_text[], _end[];
105
106extern u_int64_t ia64_gateway_page[];
107extern u_int64_t break_sigtramp[];
108extern u_int64_t epc_sigtramp[];
109
110FPSWA_INTERFACE *fpswa_interface;
111
112u_int64_t ia64_pal_base;
113u_int64_t ia64_port_base;
114
115char machine[] = MACHINE;
116SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
117
118static char cpu_model[64];
119SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, 0,
120    "The CPU model name");
121
122static char cpu_family[64];
123SYSCTL_STRING(_hw, OID_AUTO, family, CTLFLAG_RD, cpu_family, 0,
124    "The CPU family name");
125
126#ifdef DDB
127/* start and end of kernel symbol table */
128void	*ksym_start, *ksym_end;
129#endif
130
131static void cpu_startup(void *);
132SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
133
134struct msgbuf *msgbufp=0;
135
136long Maxmem = 0;
137
138vm_offset_t phys_avail[100];
139
140/* must be 2 less so 0 0 can signal end of chunks */
141#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
142
143void mi_startup(void);		/* XXX should be in a MI header */
144
145struct kva_md_info kmi;
146
147static void
148identifycpu(void)
149{
150	char vendor[17];
151	char *family_name, *model_name;
152	u_int64_t t;
153	int number, revision, model, family, archrev;
154	u_int64_t features;
155
156	/*
157	 * Assumes little-endian.
158	 */
159	*(u_int64_t *) &vendor[0] = ia64_get_cpuid(0);
160	*(u_int64_t *) &vendor[8] = ia64_get_cpuid(1);
161	vendor[16] = '\0';
162
163	t = ia64_get_cpuid(3);
164	number = (t >> 0) & 0xff;
165	revision = (t >> 8) & 0xff;
166	model = (t >> 16) & 0xff;
167	family = (t >> 24) & 0xff;
168	archrev = (t >> 32) & 0xff;
169
170	family_name = model_name = "unknown";
171	switch (family) {
172	case 0x07:
173		family_name = "Itanium";
174		model_name = "Merced";
175		break;
176	case 0x1f:
177		family_name = "Itanium 2";
178		switch (model) {
179		case 0x00:
180			model_name = "McKinley";
181			break;
182		case 0x01:
183			model_name = "Madison";
184			break;
185		}
186		break;
187	}
188	snprintf(cpu_family, sizeof(cpu_family), "%s", family_name);
189	snprintf(cpu_model, sizeof(cpu_model), "%s", model_name);
190
191	features = ia64_get_cpuid(4);
192
193	printf("CPU: %s (", model_name);
194	if (processor_frequency) {
195		printf("%ld.%02ld-Mhz ",
196		    (processor_frequency + 4999) / 1000000,
197		    ((processor_frequency + 4999) / 10000) % 100);
198	}
199	printf("%s)\n", family_name);
200	printf("  Origin = \"%s\"  Revision = %d\n", vendor, revision);
201	printf("  Features = 0x%b\n", (u_int32_t) features,
202	    "\020"
203	    "\001LB"	/* long branch (brl) instruction. */
204	    "\002SD"	/* Spontaneous deferral. */
205	    "\003AO"	/* 16-byte atomic operations (ld, st, cmpxchg). */ );
206}
207
208static void
209cpu_startup(dummy)
210	void *dummy;
211{
212
213	/*
214	 * Good {morning,afternoon,evening,night}.
215	 */
216	identifycpu();
217
218	/* startrtclock(); */
219#ifdef PERFMON
220	perfmon_init();
221#endif
222	printf("real memory  = %ld (%ld MB)\n", ia64_ptob(Maxmem),
223	    ia64_ptob(Maxmem) / 1048576);
224
225	/*
226	 * Display any holes after the first chunk of extended memory.
227	 */
228	if (bootverbose) {
229		int indx;
230
231		printf("Physical memory chunk(s):\n");
232		for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
233			int size1 = phys_avail[indx + 1] - phys_avail[indx];
234
235			printf("0x%08lx - 0x%08lx, %d bytes (%d pages)\n", phys_avail[indx],
236			    phys_avail[indx + 1] - 1, size1, size1 / PAGE_SIZE);
237		}
238	}
239
240	vm_ksubmap_init(&kmi);
241
242	printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
243	    ptoa(cnt.v_free_count) / 1048576);
244
245	if (fpswa_interface == NULL)
246		printf("Warning: no FPSWA package supplied\n");
247	else
248		printf("FPSWA Revision = 0x%lx, Entry = %p\n",
249		    (long)fpswa_interface->Revision,
250		    (void *)fpswa_interface->Fpswa);
251
252	/*
253	 * Set up buffers, so they can be used to read disk labels.
254	 */
255	bufinit();
256	vm_pager_bufferinit();
257
258	/*
259	 * Traverse the MADT to discover IOSAPIC and Local SAPIC
260	 * information.
261	 */
262	ia64_probe_sapics();
263	ia64_mca_init();
264}
265
266void
267cpu_boot(int howto)
268{
269
270	ia64_efi_runtime->ResetSystem(EfiResetWarm, EFI_SUCCESS, 0, 0);
271}
272
273void
274cpu_halt()
275{
276
277	ia64_efi_runtime->ResetSystem(EfiResetWarm, EFI_SUCCESS, 0, 0);
278}
279
280static void
281cpu_idle_default(void)
282{
283	struct ia64_pal_result res;
284
285	res = ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0);
286}
287
288void
289cpu_idle()
290{
291	(*cpu_idle_hook)();
292}
293
294/* Other subsystems (e.g., ACPI) can hook this later. */
295void (*cpu_idle_hook)(void) = cpu_idle_default;
296
297void
298cpu_reset()
299{
300
301	cpu_boot(0);
302}
303
304void
305cpu_switch(struct thread *old, struct thread *new)
306{
307	struct pcb *oldpcb, *newpcb;
308
309	oldpcb = old->td_pcb;
310#if IA32
311	ia32_savectx(oldpcb);
312#endif
313	if (!savectx(oldpcb)) {
314		newpcb = new->td_pcb;
315		oldpcb->pcb_current_pmap =
316		    pmap_switch(newpcb->pcb_current_pmap);
317		PCPU_SET(curthread, new);
318#if IA32
319		ia32_restorectx(newpcb);
320#endif
321		restorectx(newpcb);
322		/* We should not get here. */
323		panic("cpu_switch: restorectx() returned");
324		/* NOTREACHED */
325	}
326}
327
328void
329cpu_throw(struct thread *old __unused, struct thread *new)
330{
331	struct pcb *newpcb;
332
333	newpcb = new->td_pcb;
334	(void)pmap_switch(newpcb->pcb_current_pmap);
335	PCPU_SET(curthread, new);
336#if IA32
337	ia32_restorectx(newpcb);
338#endif
339	restorectx(newpcb);
340	/* We should not get here. */
341	panic("cpu_throw: restorectx() returned");
342	/* NOTREACHED */
343}
344
345void
346cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
347{
348	size_t pcpusz;
349
350	/*
351	 * Make sure the PCB is 16-byte aligned by making the PCPU
352	 * a multiple of 16 bytes. We assume the PCPU is 16-byte
353	 * aligned itself.
354	 */
355	pcpusz = (sizeof(struct pcpu) + 15) & ~15;
356	KASSERT(size >= pcpusz + sizeof(struct pcb),
357	    ("%s: too small an allocation for pcpu", __func__));
358	pcpu->pc_pcb = (struct pcb *)((char*)pcpu + pcpusz);
359}
360
361void
362map_pal_code(void)
363{
364	struct ia64_pte pte;
365	u_int64_t psr;
366
367	if (ia64_pal_base == 0)
368		return;
369
370	bzero(&pte, sizeof(pte));
371	pte.pte_p = 1;
372	pte.pte_ma = PTE_MA_WB;
373	pte.pte_a = 1;
374	pte.pte_d = 1;
375	pte.pte_pl = PTE_PL_KERN;
376	pte.pte_ar = PTE_AR_RWX;
377	pte.pte_ppn = ia64_pal_base >> 12;
378
379	__asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
380	    "r"(IA64_PHYS_TO_RR7(ia64_pal_base)), "r"(IA64_ID_PAGE_SHIFT<<2));
381
382	__asm __volatile("mov	%0=psr" : "=r"(psr));
383	__asm __volatile("rsm	psr.ic|psr.i");
384	__asm __volatile("srlz.i");
385	__asm __volatile("mov	cr.ifa=%0" ::
386	    "r"(IA64_PHYS_TO_RR7(ia64_pal_base)));
387	__asm __volatile("mov	cr.itir=%0" :: "r"(IA64_ID_PAGE_SHIFT << 2));
388	__asm __volatile("itr.d	dtr[%0]=%1" :: "r"(1), "r"(*(u_int64_t*)&pte));
389	__asm __volatile("srlz.d");		/* XXX not needed. */
390	__asm __volatile("itr.i	itr[%0]=%1" :: "r"(1), "r"(*(u_int64_t*)&pte));
391	__asm __volatile("mov	psr.l=%0" :: "r" (psr));
392	__asm __volatile("srlz.i");
393}
394
395void
396map_port_space(void)
397{
398	struct ia64_pte pte;
399	u_int64_t psr;
400
401	/* XXX we should fail hard if there's no I/O port space. */
402	if (ia64_port_base == 0)
403		return;
404
405	bzero(&pte, sizeof(pte));
406	pte.pte_p = 1;
407	pte.pte_ma = PTE_MA_UC;
408	pte.pte_a = 1;
409	pte.pte_d = 1;
410	pte.pte_pl = PTE_PL_KERN;
411	pte.pte_ar = PTE_AR_RW;
412	pte.pte_ppn = ia64_port_base >> 12;
413
414	__asm __volatile("ptr.d %0,%1" :: "r"(ia64_port_base), "r"(24 << 2));
415
416	__asm __volatile("mov	%0=psr" : "=r" (psr));
417	__asm __volatile("rsm	psr.ic|psr.i");
418	__asm __volatile("srlz.d");
419	__asm __volatile("mov	cr.ifa=%0" :: "r"(ia64_port_base));
420	__asm __volatile("mov	cr.itir=%0" :: "r"(IA64_ID_PAGE_SHIFT << 2));
421	__asm __volatile("itr.d dtr[%0]=%1" :: "r"(2), "r"(*(u_int64_t*)&pte));
422	__asm __volatile("mov	psr.l=%0" :: "r" (psr));
423	__asm __volatile("srlz.d");
424}
425
426void
427map_gateway_page(void)
428{
429	struct ia64_pte pte;
430	u_int64_t psr;
431
432	bzero(&pte, sizeof(pte));
433	pte.pte_p = 1;
434	pte.pte_ma = PTE_MA_WB;
435	pte.pte_a = 1;
436	pte.pte_d = 1;
437	pte.pte_pl = PTE_PL_KERN;
438	pte.pte_ar = PTE_AR_X_RX;
439	pte.pte_ppn = IA64_RR_MASK((u_int64_t)ia64_gateway_page) >> 12;
440
441	__asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
442	    "r"(VM_MAX_ADDRESS), "r"(PAGE_SHIFT << 2));
443
444	__asm __volatile("mov	%0=psr" : "=r"(psr));
445	__asm __volatile("rsm	psr.ic|psr.i");
446	__asm __volatile("srlz.i");
447	__asm __volatile("mov	cr.ifa=%0" :: "r"(VM_MAX_ADDRESS));
448	__asm __volatile("mov	cr.itir=%0" :: "r"(PAGE_SHIFT << 2));
449	__asm __volatile("itr.d	dtr[%0]=%1" :: "r"(3), "r"(*(u_int64_t*)&pte));
450	__asm __volatile("srlz.d");		/* XXX not needed. */
451	__asm __volatile("itr.i	itr[%0]=%1" :: "r"(3), "r"(*(u_int64_t*)&pte));
452	__asm __volatile("mov	psr.l=%0" :: "r" (psr));
453	__asm __volatile("srlz.i");
454
455	/* Expose the mapping to userland in ar.k5 */
456	ia64_set_k5(VM_MAX_ADDRESS);
457}
458
459static void
460calculate_frequencies(void)
461{
462	struct ia64_sal_result sal;
463	struct ia64_pal_result pal;
464
465	sal = ia64_sal_entry(SAL_FREQ_BASE, 0, 0, 0, 0, 0, 0, 0);
466	pal = ia64_call_pal_static(PAL_FREQ_RATIOS, 0, 0, 0);
467
468	if (sal.sal_status == 0 && pal.pal_status == 0) {
469		if (bootverbose) {
470			printf("Platform clock frequency %ld Hz\n",
471			       sal.sal_result[0]);
472			printf("Processor ratio %ld/%ld, Bus ratio %ld/%ld, "
473			       "ITC ratio %ld/%ld\n",
474			       pal.pal_result[0] >> 32,
475			       pal.pal_result[0] & ((1L << 32) - 1),
476			       pal.pal_result[1] >> 32,
477			       pal.pal_result[1] & ((1L << 32) - 1),
478			       pal.pal_result[2] >> 32,
479			       pal.pal_result[2] & ((1L << 32) - 1));
480		}
481		processor_frequency =
482			sal.sal_result[0] * (pal.pal_result[0] >> 32)
483			/ (pal.pal_result[0] & ((1L << 32) - 1));
484		bus_frequency =
485			sal.sal_result[0] * (pal.pal_result[1] >> 32)
486			/ (pal.pal_result[1] & ((1L << 32) - 1));
487		itc_frequency =
488			sal.sal_result[0] * (pal.pal_result[2] >> 32)
489			/ (pal.pal_result[2] & ((1L << 32) - 1));
490	}
491}
492
493void
494ia64_init(void)
495{
496	int phys_avail_cnt;
497	vm_offset_t kernstart, kernend;
498	vm_offset_t kernstartpfn, kernendpfn, pfn0, pfn1;
499	char *p;
500	EFI_MEMORY_DESCRIPTOR *md, *mdp;
501	int mdcount, i, metadata_missing;
502
503	/* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */
504
505	/*
506	 * TODO: Disable interrupts, floating point etc.
507	 * Maybe flush cache and tlb
508	 */
509	ia64_set_fpsr(IA64_FPSR_DEFAULT);
510
511	/*
512	 * TODO: Get critical system information (if possible, from the
513	 * information provided by the boot program).
514	 */
515
516	/*
517	 * pa_bootinfo is the physical address of the bootinfo block as
518	 * passed to us by the loader and set in locore.s.
519	 */
520	bootinfo = *(struct bootinfo *)(IA64_PHYS_TO_RR7(pa_bootinfo));
521
522	if (bootinfo.bi_magic != BOOTINFO_MAGIC || bootinfo.bi_version != 1) {
523		bzero(&bootinfo, sizeof(bootinfo));
524		bootinfo.bi_kernend = (vm_offset_t) round_page(_end);
525	}
526
527	/*
528	 * Look for the I/O ports first - we need them for console
529	 * probing.
530	 */
531	mdcount = bootinfo.bi_memmap_size / bootinfo.bi_memdesc_size;
532	md = (EFI_MEMORY_DESCRIPTOR *) IA64_PHYS_TO_RR7(bootinfo.bi_memmap);
533
534	for (i = 0, mdp = md; i < mdcount; i++,
535	    mdp = NextMemoryDescriptor(mdp, bootinfo.bi_memdesc_size)) {
536		if (mdp->Type == EfiMemoryMappedIOPortSpace)
537			ia64_port_base = IA64_PHYS_TO_RR6(mdp->PhysicalStart);
538		else if (mdp->Type == EfiPalCode)
539			ia64_pal_base = mdp->PhysicalStart;
540	}
541
542	map_port_space();
543
544	metadata_missing = 0;
545	if (bootinfo.bi_modulep)
546		preload_metadata = (caddr_t)bootinfo.bi_modulep;
547	else
548		metadata_missing = 1;
549	if (envmode == 1)
550		kern_envp = static_env;
551	else
552		kern_envp = (caddr_t)bootinfo.bi_envp;
553
554	/*
555	 * Look at arguments passed to us and compute boothowto.
556	 */
557	boothowto = bootinfo.bi_boothowto;
558
559	/*
560	 * Catch case of boot_verbose set in environment.
561	 */
562	if ((p = getenv("boot_verbose")) != NULL) {
563		if (strcmp(p, "yes") == 0 || strcmp(p, "YES") == 0) {
564			boothowto |= RB_VERBOSE;
565		}
566		freeenv(p);
567	}
568
569	if (boothowto & RB_VERBOSE)
570		bootverbose = 1;
571
572	/*
573	 * Initialize the console before we print anything out.
574	 */
575	cninit();
576
577	/* OUTPUT NOW ALLOWED */
578
579	if (ia64_pal_base != 0) {
580		ia64_pal_base &= ~IA64_ID_PAGE_MASK;
581		/*
582		 * We use a TR to map the first 256M of memory - this might
583		 * cover the palcode too.
584		 */
585		if (ia64_pal_base == 0)
586			printf("PAL code mapped by the kernel's TR\n");
587	} else
588		printf("PAL code not found\n");
589
590	/*
591	 * Wire things up so we can call the firmware.
592	 */
593	map_pal_code();
594	ia64_efi_init();
595	calculate_frequencies();
596
597	/*
598	 * Find the beginning and end of the kernel.
599	 */
600	kernstart = trunc_page(kernel_text);
601#ifdef DDB
602	ksym_start = (void *)bootinfo.bi_symtab;
603	ksym_end = (void *)bootinfo.bi_esymtab;
604	kernend = (vm_offset_t)round_page(ksym_end);
605#else
606	kernend = (vm_offset_t)round_page(_end);
607#endif
608
609	/* But if the bootstrap tells us otherwise, believe it! */
610	if (bootinfo.bi_kernend)
611		kernend = round_page(bootinfo.bi_kernend);
612	if (metadata_missing)
613		printf("WARNING: loader(8) metadata is missing!\n");
614
615	/* Get FPSWA interface */
616	fpswa_interface = (FPSWA_INTERFACE*)IA64_PHYS_TO_RR7(bootinfo.bi_fpswa);
617
618	/* Init basic tunables, including hz */
619	init_param1();
620
621	p = getenv("kernelname");
622	if (p) {
623		strncpy(kernelname, p, sizeof(kernelname) - 1);
624		freeenv(p);
625	}
626
627	kernstartpfn = atop(IA64_RR_MASK(kernstart));
628	kernendpfn = atop(IA64_RR_MASK(kernend));
629
630	/*
631	 * Size the memory regions and load phys_avail[] with the results.
632	 */
633
634	/*
635	 * Find out how much memory is available, by looking at
636	 * the memory descriptors.
637	 */
638
639#ifdef DEBUG_MD
640	printf("Memory descriptor count: %d\n", mdcount);
641#endif
642
643	phys_avail_cnt = 0;
644	for (i = 0, mdp = md; i < mdcount; i++,
645		 mdp = NextMemoryDescriptor(mdp, bootinfo.bi_memdesc_size)) {
646#ifdef DEBUG_MD
647		printf("MD %d: type %d pa 0x%lx cnt 0x%lx\n", i,
648		       mdp->Type,
649		       mdp->PhysicalStart,
650		       mdp->NumberOfPages);
651#endif
652
653		pfn0 = ia64_btop(round_page(mdp->PhysicalStart));
654		pfn1 = ia64_btop(trunc_page(mdp->PhysicalStart
655					    + mdp->NumberOfPages * 4096));
656		if (pfn1 <= pfn0)
657			continue;
658
659		if (mdp->Type != EfiConventionalMemory)
660			continue;
661
662		/*
663		 * Wimp out for now since we do not DTRT here with
664		 * pci bus mastering (no bounce buffering, for example).
665		 */
666		if (pfn0 >= ia64_btop(0x100000000UL)) {
667			printf("Skipping memory chunk start 0x%lx\n",
668			    mdp->PhysicalStart);
669			continue;
670		}
671		if (pfn1 >= ia64_btop(0x100000000UL)) {
672			printf("Skipping memory chunk end 0x%lx\n",
673			    mdp->PhysicalStart + mdp->NumberOfPages * 4096);
674			continue;
675		}
676
677		/*
678		 * We have a memory descriptor that describes conventional
679		 * memory that is for general use. We must determine if the
680		 * loader has put the kernel in this region.
681		 */
682		physmem += (pfn1 - pfn0);
683		if (pfn0 <= kernendpfn && kernstartpfn <= pfn1) {
684			/*
685			 * Must compute the location of the kernel
686			 * within the segment.
687			 */
688#ifdef DEBUG_MD
689			printf("Descriptor %d contains kernel\n", i);
690#endif
691			if (pfn0 < kernstartpfn) {
692				/*
693				 * There is a chunk before the kernel.
694				 */
695#ifdef DEBUG_MD
696				printf("Loading chunk before kernel: "
697				       "0x%lx / 0x%lx\n", pfn0, kernstartpfn);
698#endif
699				phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
700				phys_avail[phys_avail_cnt+1] = ia64_ptob(kernstartpfn);
701				phys_avail_cnt += 2;
702			}
703			if (kernendpfn < pfn1) {
704				/*
705				 * There is a chunk after the kernel.
706				 */
707#ifdef DEBUG_MD
708				printf("Loading chunk after kernel: "
709				       "0x%lx / 0x%lx\n", kernendpfn, pfn1);
710#endif
711				phys_avail[phys_avail_cnt] = ia64_ptob(kernendpfn);
712				phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
713				phys_avail_cnt += 2;
714			}
715		} else {
716			/*
717			 * Just load this cluster as one chunk.
718			 */
719#ifdef DEBUG_MD
720			printf("Loading descriptor %d: 0x%lx / 0x%lx\n", i,
721			       pfn0, pfn1);
722#endif
723			phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
724			phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
725			phys_avail_cnt += 2;
726
727		}
728	}
729	phys_avail[phys_avail_cnt] = 0;
730
731	Maxmem = physmem;
732	init_param2(physmem);
733
734	/*
735	 * Initialize error message buffer (at end of core).
736	 */
737	msgbufp = (struct msgbuf *)pmap_steal_memory(MSGBUF_SIZE);
738	msgbufinit(msgbufp, MSGBUF_SIZE);
739
740	proc_linkup(&proc0, &ksegrp0, &kse0, &thread0);
741	/*
742	 * Init mapping for u page(s) for proc 0
743	 */
744	proc0uarea = (struct user *)pmap_steal_memory(UAREA_PAGES * PAGE_SIZE);
745	proc0kstack = (vm_offset_t)kstack;
746	proc0.p_uarea = proc0uarea;
747	thread0.td_kstack = proc0kstack;
748	thread0.td_pcb = (struct pcb *)
749	    (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
750	/*
751	 * Setup the global data for the bootstrap cpu.
752	 */
753	pcpup = (struct pcpu *)pmap_steal_memory(PAGE_SIZE);
754	ia64_set_k4((u_int64_t)pcpup);
755	pcpu_init(pcpup, 0, PAGE_SIZE);
756	PCPU_SET(curthread, &thread0);
757
758	/*
759	 * Initialize the rest of proc 0's PCB.
760	 *
761	 * Set the kernel sp, reserving space for an (empty) trapframe,
762	 * and make proc0's trapframe pointer point to it for sanity.
763	 * Initialise proc0's backing store to start after u area.
764	 */
765	thread0.td_frame = (struct trapframe *)thread0.td_pcb - 1;
766	thread0.td_frame->tf_length = sizeof(struct trapframe);
767	thread0.td_frame->tf_flags = FRAME_SYSCALL;
768	thread0.td_pcb->pcb_special.sp =
769	    (u_int64_t)thread0.td_frame - 16;
770	thread0.td_pcb->pcb_special.bspstore = (u_int64_t)proc0kstack;
771
772	mutex_init();
773
774	/*
775	 * Initialize the virtual memory system.
776	 */
777	pmap_bootstrap();
778
779	/*
780	 * Initialize debuggers, and break into them if appropriate.
781	 */
782#ifdef DDB
783	kdb_init();
784	if (boothowto & RB_KDB) {
785		printf("Boot flags requested debugger\n");
786		breakpoint();
787	}
788#endif
789	ia64_set_tpr(0);
790
791	/*
792	 * Save our current context so that we have a known (maybe even
793	 * sane) context as the initial context for new threads that are
794	 * forked from us. If any of those threads (including thread0)
795	 * does something wrong, we may be lucky and return here where
796	 * we're ready for them with a nice panic.
797	 */
798	if (!savectx(thread0.td_pcb))
799		mi_startup();
800
801	/* We should not get here. */
802	panic("ia64_init: Whooaa there!");
803	/* NOTREACHED */
804}
805
806void
807bzero(void *buf, size_t len)
808{
809	caddr_t p = buf;
810
811	while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
812		*p++ = 0;
813		len--;
814	}
815	while (len >= sizeof(u_long) * 8) {
816		*(u_long*) p = 0;
817		*((u_long*) p + 1) = 0;
818		*((u_long*) p + 2) = 0;
819		*((u_long*) p + 3) = 0;
820		len -= sizeof(u_long) * 8;
821		*((u_long*) p + 4) = 0;
822		*((u_long*) p + 5) = 0;
823		*((u_long*) p + 6) = 0;
824		*((u_long*) p + 7) = 0;
825		p += sizeof(u_long) * 8;
826	}
827	while (len >= sizeof(u_long)) {
828		*(u_long*) p = 0;
829		len -= sizeof(u_long);
830		p += sizeof(u_long);
831	}
832	while (len) {
833		*p++ = 0;
834		len--;
835	}
836}
837
838void
839DELAY(int n)
840{
841	u_int64_t start, end, now;
842
843	start = ia64_get_itc();
844	end = start + (itc_frequency * n) / 1000000;
845	/* printf("DELAY from 0x%lx to 0x%lx\n", start, end); */
846	do {
847		now = ia64_get_itc();
848	} while (now < end || (now > start && end < start));
849}
850
851/*
852 * Send an interrupt (signal) to a process.
853 */
854void
855sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
856{
857	struct proc *p;
858	struct thread *td;
859	struct trapframe *tf;
860	struct sigacts *psp;
861	struct sigframe sf, *sfp;
862	u_int64_t sbs, sp;
863	int oonstack;
864
865	td = curthread;
866	p = td->td_proc;
867	PROC_LOCK_ASSERT(p, MA_OWNED);
868	psp = p->p_sigacts;
869	mtx_assert(&psp->ps_mtx, MA_OWNED);
870	tf = td->td_frame;
871	sp = tf->tf_special.sp;
872	oonstack = sigonstack(sp);
873	sbs = 0;
874
875	/* save user context */
876	bzero(&sf, sizeof(struct sigframe));
877	sf.sf_uc.uc_sigmask = *mask;
878	sf.sf_uc.uc_stack = p->p_sigstk;
879	sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK)
880	    ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
881
882	/*
883	 * Allocate and validate space for the signal handler
884	 * context. Note that if the stack is in P0 space, the
885	 * call to grow() is a nop, and the useracc() check
886	 * will fail if the process has not already allocated
887	 * the space with a `brk'.
888	 */
889	if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack &&
890	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
891		sbs = (u_int64_t)p->p_sigstk.ss_sp;
892		sbs = (sbs + 15) & ~15;
893		sfp = (struct sigframe *)(sbs + p->p_sigstk.ss_size);
894#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
895		p->p_sigstk.ss_flags |= SS_ONSTACK;
896#endif
897	} else
898		sfp = (struct sigframe *)sp;
899	sfp = (struct sigframe *)((u_int64_t)(sfp - 1) & ~15);
900
901	/* Fill in the siginfo structure for POSIX handlers. */
902	if (SIGISMEMBER(psp->ps_siginfo, sig)) {
903		sf.sf_si.si_signo = sig;
904		sf.sf_si.si_code = code;
905		sf.sf_si.si_addr = (void*)tf->tf_special.ifa;
906		code = (u_int64_t)&sfp->sf_si;
907	}
908
909	mtx_unlock(&psp->ps_mtx);
910	PROC_UNLOCK(p);
911
912	get_mcontext(td, &sf.sf_uc.uc_mcontext, GET_MC_IA64_SCRATCH);
913
914	/* Copy the frame out to userland. */
915	if (copyout(&sf, sfp, sizeof(sf)) != 0) {
916		/*
917		 * Process has trashed its stack; give it an illegal
918		 * instruction to halt it in its tracks.
919		 */
920		PROC_LOCK(p);
921		sigexit(td, SIGILL);
922		return;
923	}
924
925	if ((tf->tf_flags & FRAME_SYSCALL) == 0) {
926		tf->tf_special.psr &= ~IA64_PSR_RI;
927		tf->tf_special.iip = ia64_get_k5() +
928		    ((uint64_t)break_sigtramp - (uint64_t)ia64_gateway_page);
929	} else
930		tf->tf_special.iip = ia64_get_k5() +
931		    ((uint64_t)epc_sigtramp - (uint64_t)ia64_gateway_page);
932
933	/*
934	 * Setup the trapframe to return to the signal trampoline. We pass
935	 * information to the trampoline in the following registers:
936	 *
937	 *	gp	new backing store or NULL
938	 *	r8	signal number
939	 *	r9	signal code or siginfo pointer
940	 *	r10	signal handler (function descriptor)
941	 */
942	tf->tf_special.sp = (u_int64_t)sfp - 16;
943	tf->tf_special.gp = sbs;
944	tf->tf_scratch.gr8 = sig;
945	tf->tf_scratch.gr9 = code;
946	tf->tf_scratch.gr10 = (u_int64_t)catcher;
947
948	PROC_LOCK(p);
949	mtx_lock(&psp->ps_mtx);
950}
951
952/*
953 * Build siginfo_t for SA thread
954 */
955void
956cpu_thread_siginfo(int sig, u_long code, siginfo_t *si)
957{
958	struct proc *p;
959	struct thread *td;
960
961	td = curthread;
962	p = td->td_proc;
963	PROC_LOCK_ASSERT(p, MA_OWNED);
964
965	bzero(si, sizeof(*si));
966	si->si_signo = sig;
967	si->si_code = code;
968	/* XXXKSE fill other fields */
969}
970
971/*
972 * System call to cleanup state after a signal
973 * has been taken.  Reset signal mask and
974 * stack state from context left by sendsig (above).
975 * Return to previous pc and psl as specified by
976 * context left by sendsig. Check carefully to
977 * make sure that the user has not modified the
978 * state to gain improper privileges.
979 *
980 * MPSAFE
981 */
982int
983sigreturn(struct thread *td,
984	struct sigreturn_args /* {
985		ucontext_t *sigcntxp;
986	} */ *uap)
987{
988	ucontext_t uc;
989	struct trapframe *tf;
990	struct proc *p;
991	struct pcb *pcb;
992
993	tf = td->td_frame;
994	p = td->td_proc;
995	pcb = td->td_pcb;
996
997	/*
998	 * Fetch the entire context structure at once for speed.
999	 * We don't use a normal argument to simplify RSE handling.
1000	 */
1001	if (copyin(uap->sigcntxp, (caddr_t)&uc, sizeof(uc)))
1002		return (EFAULT);
1003
1004	set_mcontext(td, &uc.uc_mcontext);
1005
1006	PROC_LOCK(p);
1007#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1008	if (sigonstack(tf->tf_special.sp))
1009		p->p_sigstk.ss_flags |= SS_ONSTACK;
1010	else
1011		p->p_sigstk.ss_flags &= ~SS_ONSTACK;
1012#endif
1013	td->td_sigmask = uc.uc_sigmask;
1014	SIG_CANTMASK(td->td_sigmask);
1015	signotify(td);
1016	PROC_UNLOCK(p);
1017
1018	return (EJUSTRETURN);
1019}
1020
1021#ifdef COMPAT_FREEBSD4
1022int
1023freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
1024{
1025
1026	return sigreturn(td, (struct sigreturn_args *)uap);
1027}
1028#endif
1029
1030int
1031get_mcontext(struct thread *td, mcontext_t *mc, int flags)
1032{
1033	struct trapframe *tf;
1034	uint64_t bspst, kstk, rnat;
1035
1036	tf = td->td_frame;
1037	bzero(mc, sizeof(*mc));
1038	if (tf->tf_special.ndirty != 0) {
1039		kstk = td->td_kstack + (tf->tf_special.bspstore & 0x1ffUL);
1040		__asm __volatile("mov	ar.rsc=0;;");
1041		__asm __volatile("mov	%0=ar.bspstore" : "=r"(bspst));
1042		/* Make sure we have all the user registers written out. */
1043		if (bspst - kstk < tf->tf_special.ndirty) {
1044			__asm __volatile("flushrs;;");
1045			__asm __volatile("mov	%0=ar.bspstore" : "=r"(bspst));
1046		}
1047		__asm __volatile("mov	%0=ar.rnat;;" : "=r"(rnat));
1048		__asm __volatile("mov	ar.rsc=3");
1049		copyout((void*)kstk, (void*)tf->tf_special.bspstore,
1050		    tf->tf_special.ndirty);
1051		kstk += tf->tf_special.ndirty;
1052		tf->tf_special.bspstore += tf->tf_special.ndirty;
1053		tf->tf_special.ndirty = 0;
1054		tf->tf_special.rnat =
1055		    (bspst > kstk && (bspst & 0x1ffUL) < (kstk & 0x1ffUL))
1056		    ? *(uint64_t*)(kstk | 0x1f8UL) : rnat;
1057	}
1058	if (tf->tf_flags & FRAME_SYSCALL) {
1059		if (flags & GET_MC_IA64_SCRATCH) {
1060			mc->mc_flags |= _MC_FLAGS_SCRATCH_VALID;
1061			mc->mc_scratch = tf->tf_scratch;
1062		} else {
1063			/*
1064			 * Put the syscall return values in the context.  We
1065			 * need this for swapcontext() to work.  Note that we
1066			 * don't use gr11 in the kernel, but the runtime
1067			 * specification defines it as a return register,
1068			 * just like gr8-gr10.
1069			 */
1070			mc->mc_flags |= _MC_FLAGS_RETURN_VALID;
1071			if ((flags & GET_MC_CLEAR_RET) == 0) {
1072				mc->mc_scratch.gr8 = tf->tf_scratch.gr8;
1073				mc->mc_scratch.gr9 = tf->tf_scratch.gr9;
1074				mc->mc_scratch.gr10 = tf->tf_scratch.gr10;
1075				mc->mc_scratch.gr11 = tf->tf_scratch.gr11;
1076			}
1077		}
1078	} else {
1079		mc->mc_flags |= _MC_FLAGS_ASYNC_CONTEXT;
1080		mc->mc_scratch = tf->tf_scratch;
1081		mc->mc_scratch_fp = tf->tf_scratch_fp;
1082		/* XXX High FP */
1083	}
1084	mc->mc_special = tf->tf_special;
1085	save_callee_saved(&mc->mc_preserved);
1086	save_callee_saved_fp(&mc->mc_preserved_fp);
1087	return (0);
1088}
1089
1090int
1091set_mcontext(struct thread *td, const mcontext_t *mc)
1092{
1093	struct _special s;
1094	struct trapframe *tf;
1095	uint64_t psrmask;
1096
1097	tf = td->td_frame;
1098
1099	KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
1100	    ("Whoa there! We have more than 8KB of dirty registers!"));
1101
1102	s = mc->mc_special;
1103	/*
1104	 * Only copy the user mask and the restart instruction bit from
1105	 * the new context.
1106	 */
1107	psrmask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
1108	    IA64_PSR_MFH | IA64_PSR_RI;
1109	s.psr = (tf->tf_special.psr & ~psrmask) | (s.psr & psrmask);
1110	/* We don't have any dirty registers of the new context. */
1111	s.ndirty = 0;
1112	if (mc->mc_flags & _MC_FLAGS_ASYNC_CONTEXT) {
1113		KASSERT((tf->tf_flags & FRAME_SYSCALL) == 0, ("foo"));
1114		tf->tf_scratch = mc->mc_scratch;
1115		tf->tf_scratch_fp = mc->mc_scratch_fp;
1116		/* XXX High FP */
1117	} else {
1118		KASSERT((tf->tf_flags & FRAME_SYSCALL) != 0, ("foo"));
1119		if ((mc->mc_flags & _MC_FLAGS_SCRATCH_VALID) == 0) {
1120			s.cfm = tf->tf_special.cfm;
1121			s.iip = tf->tf_special.iip;
1122			tf->tf_scratch.gr15 = 0;	/* Clear syscall nr. */
1123			if (mc->mc_flags & _MC_FLAGS_RETURN_VALID) {
1124				tf->tf_scratch.gr8 = mc->mc_scratch.gr8;
1125				tf->tf_scratch.gr9 = mc->mc_scratch.gr9;
1126				tf->tf_scratch.gr10 = mc->mc_scratch.gr10;
1127				tf->tf_scratch.gr11 = mc->mc_scratch.gr11;
1128			}
1129		} else
1130			tf->tf_scratch = mc->mc_scratch;
1131	}
1132	tf->tf_special = s;
1133	restore_callee_saved(&mc->mc_preserved);
1134	restore_callee_saved_fp(&mc->mc_preserved_fp);
1135
1136	if (mc->mc_flags & _MC_FLAGS_KSE_SET_MBOX)
1137		suword((caddr_t)mc->mc_special.ifa, mc->mc_special.isr);
1138
1139	return (0);
1140}
1141
1142/*
1143 * Clear registers on exec.
1144 */
1145void
1146exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
1147{
1148	struct trapframe *tf;
1149	uint64_t *ksttop, *kst;
1150
1151	tf = td->td_frame;
1152	ksttop = (uint64_t*)(td->td_kstack + tf->tf_special.ndirty +
1153	    (tf->tf_special.bspstore & 0x1ffUL));
1154
1155	/*
1156	 * We can ignore up to 8KB of dirty registers by masking off the
1157	 * lower 13 bits in exception_restore() or epc_syscall(). This
1158	 * should be enough for a couple of years, but if there are more
1159	 * than 8KB of dirty registers, we lose track of the bottom of
1160	 * the kernel stack. The solution is to copy the active part of
1161	 * the kernel stack down 1 page (or 2, but not more than that)
1162	 * so that we always have less than 8KB of dirty registers.
1163	 */
1164	KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
1165	    ("Whoa there! We have more than 8KB of dirty registers!"));
1166
1167	bzero(&tf->tf_special, sizeof(tf->tf_special));
1168	if ((tf->tf_flags & FRAME_SYSCALL) == 0) {	/* break syscalls. */
1169		bzero(&tf->tf_scratch, sizeof(tf->tf_scratch));
1170		bzero(&tf->tf_scratch_fp, sizeof(tf->tf_scratch_fp));
1171		tf->tf_special.cfm = (1UL<<63) | (3UL<<7) | 3UL;
1172		tf->tf_special.bspstore = IA64_BACKINGSTORE;
1173		/*
1174		 * Copy the arguments onto the kernel register stack so that
1175		 * they get loaded by the loadrs instruction. Skip over the
1176		 * NaT collection points.
1177		 */
1178		kst = ksttop - 1;
1179		if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1180			*kst-- = 0;
1181		*kst-- = 0;
1182		if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1183			*kst-- = 0;
1184		*kst-- = ps_strings;
1185		if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1186			*kst-- = 0;
1187		*kst = stack;
1188		tf->tf_special.ndirty = (ksttop - kst) << 3;
1189	} else {				/* epc syscalls (default). */
1190		tf->tf_special.cfm = (3UL<<62) | (3UL<<7) | 3UL;
1191		tf->tf_special.bspstore = IA64_BACKINGSTORE + 24;
1192		/*
1193		 * Write values for out0, out1 and out2 to the user's backing
1194		 * store and arrange for them to be restored into the user's
1195		 * initial register frame.
1196		 * Assumes that (bspstore & 0x1f8) < 0x1e0.
1197		 */
1198		suword((caddr_t)tf->tf_special.bspstore - 24, stack);
1199		suword((caddr_t)tf->tf_special.bspstore - 16, ps_strings);
1200		suword((caddr_t)tf->tf_special.bspstore -  8, 0);
1201	}
1202
1203	tf->tf_special.iip = entry;
1204	tf->tf_special.sp = (stack & ~15) - 16;
1205	tf->tf_special.rsc = 0xf;
1206	tf->tf_special.fpsr = IA64_FPSR_DEFAULT;
1207	tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT |
1208	    IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN |
1209	    IA64_PSR_CPL_USER;
1210}
1211
1212int
1213ptrace_set_pc(struct thread *td, unsigned long addr)
1214{
1215	uint64_t slot;
1216
1217	switch (addr & 0xFUL) {
1218	case 0:
1219		slot = IA64_PSR_RI_0;
1220		break;
1221	case 1:
1222		/* XXX we need to deal with MLX bundles here */
1223		slot = IA64_PSR_RI_1;
1224		break;
1225	case 2:
1226		slot = IA64_PSR_RI_2;
1227		break;
1228	default:
1229		return (EINVAL);
1230	}
1231
1232	td->td_frame->tf_special.iip = addr & ~0x0FULL;
1233	td->td_frame->tf_special.psr =
1234	    (td->td_frame->tf_special.psr & ~IA64_PSR_RI) | slot;
1235	return (0);
1236}
1237
1238int
1239ptrace_single_step(struct thread *td)
1240{
1241
1242	td->td_frame->tf_special.psr |= IA64_PSR_SS;
1243	return (0);
1244}
1245
1246int
1247fill_regs(struct thread *td, struct reg *regs)
1248{
1249	struct trapframe *tf;
1250
1251	tf = td->td_frame;
1252	regs->r_special = tf->tf_special;
1253	regs->r_scratch = tf->tf_scratch;
1254	save_callee_saved(&regs->r_preserved);
1255	return (0);
1256}
1257
1258int
1259set_regs(struct thread *td, struct reg *regs)
1260{
1261	struct trapframe *tf;
1262
1263	tf = td->td_frame;
1264	tf->tf_special = regs->r_special;
1265	tf->tf_scratch = regs->r_scratch;
1266	restore_callee_saved(&regs->r_preserved);
1267	return (0);
1268}
1269
1270int
1271fill_dbregs(struct thread *td, struct dbreg *dbregs)
1272{
1273
1274	return (ENOSYS);
1275}
1276
1277int
1278set_dbregs(struct thread *td, struct dbreg *dbregs)
1279{
1280
1281	return (ENOSYS);
1282}
1283
1284int
1285fill_fpregs(struct thread *td, struct fpreg *fpregs)
1286{
1287	struct trapframe *frame = td->td_frame;
1288	struct pcb *pcb = td->td_pcb;
1289
1290	/* Save the high FP registers. */
1291	ia64_highfp_save(td);
1292
1293	fpregs->fpr_scratch = frame->tf_scratch_fp;
1294	save_callee_saved_fp(&fpregs->fpr_preserved);
1295	fpregs->fpr_high = pcb->pcb_high_fp;
1296	return (0);
1297}
1298
1299int
1300set_fpregs(struct thread *td, struct fpreg *fpregs)
1301{
1302	struct trapframe *frame = td->td_frame;
1303	struct pcb *pcb = td->td_pcb;
1304
1305	/* Throw away the high FP registers (should be redundant). */
1306	ia64_highfp_drop(td);
1307
1308	frame->tf_scratch_fp = fpregs->fpr_scratch;
1309	restore_callee_saved_fp(&fpregs->fpr_preserved);
1310	pcb->pcb_high_fp = fpregs->fpr_high;
1311	return (0);
1312}
1313
1314/*
1315 * High FP register functions.
1316 * XXX no synchronization yet.
1317 */
1318
1319int
1320ia64_highfp_drop(struct thread *td)
1321{
1322	struct pcb *pcb;
1323	struct pcpu *cpu;
1324	struct thread *thr;
1325
1326	pcb = td->td_pcb;
1327	cpu = pcb->pcb_fpcpu;
1328	if (cpu == NULL)
1329		return (0);
1330	pcb->pcb_fpcpu = NULL;
1331	thr = cpu->pc_fpcurthread;
1332	cpu->pc_fpcurthread = NULL;
1333
1334	/* Post-mortem sanity checking. */
1335	KASSERT(thr == td, ("Inconsistent high FP state"));
1336	return (1);
1337}
1338
1339int
1340ia64_highfp_load(struct thread *td)
1341{
1342	struct pcb *pcb;
1343
1344	pcb = td->td_pcb;
1345	KASSERT(pcb->pcb_fpcpu == NULL, ("FP race on thread"));
1346	KASSERT(PCPU_GET(fpcurthread) == NULL, ("FP race on pcpu"));
1347	restore_high_fp(&pcb->pcb_high_fp);
1348	PCPU_SET(fpcurthread, td);
1349	pcb->pcb_fpcpu = pcpup;
1350	return (1);
1351}
1352
1353int
1354ia64_highfp_save(struct thread *td)
1355{
1356	struct pcb *pcb;
1357	struct pcpu *cpu;
1358	struct thread *thr;
1359
1360	/* Don't save if the high FP registers weren't modified. */
1361	if ((td->td_frame->tf_special.psr & IA64_PSR_MFH) == 0)
1362		return (ia64_highfp_drop(td));
1363
1364	pcb = td->td_pcb;
1365	cpu = pcb->pcb_fpcpu;
1366	if (cpu == NULL)
1367		return (0);
1368#ifdef SMP
1369	if (cpu != pcpup) {
1370		ipi_send(cpu->pc_lid, IPI_HIGH_FP);
1371		while (pcb->pcb_fpcpu != cpu)
1372			DELAY(100);
1373		return (1);
1374	}
1375#endif
1376	save_high_fp(&pcb->pcb_high_fp);
1377	pcb->pcb_fpcpu = NULL;
1378	thr = cpu->pc_fpcurthread;
1379	cpu->pc_fpcurthread = NULL;
1380
1381	/* Post-mortem sanity cxhecking. */
1382	KASSERT(thr == td, ("Inconsistent high FP state"));
1383	return (1);
1384}
1385
1386#ifndef DDB
1387void
1388Debugger(const char *msg)
1389{
1390	printf("Debugger(\"%s\") called.\n", msg);
1391}
1392#endif /* no DDB */
1393
1394int
1395sysbeep(int pitch, int period)
1396{
1397	return (ENODEV);
1398}
1399