machdep.c revision 82785
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/ia64/ia64/machdep.c 82785 2001-09-02 07:47:47Z peter $
27 */
28
29#include "opt_compat.h"
30#include "opt_ddb.h"
31#include "opt_simos.h"
32#include "opt_msgbuf.h"
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/eventhandler.h>
37#include <sys/sysproto.h>
38#include <sys/signalvar.h>
39#include <sys/kernel.h>
40#include <sys/proc.h>
41#include <sys/lock.h>
42#include <sys/pcpu.h>
43#include <sys/malloc.h>
44#include <sys/reboot.h>
45#include <sys/bio.h>
46#include <sys/buf.h>
47#include <sys/mbuf.h>
48#include <sys/vmmeter.h>
49#include <sys/msgbuf.h>
50#include <sys/exec.h>
51#include <sys/sysctl.h>
52#include <sys/uio.h>
53#include <sys/linker.h>
54#include <sys/random.h>
55#include <net/netisr.h>
56#include <vm/vm.h>
57#include <vm/vm_kern.h>
58#include <vm/vm_page.h>
59#include <vm/vm_map.h>
60#include <vm/vm_extern.h>
61#include <vm/vm_object.h>
62#include <vm/vm_pager.h>
63#include <sys/user.h>
64#include <sys/ptrace.h>
65#include <machine/clock.h>
66#include <machine/md_var.h>
67#include <machine/reg.h>
68#include <machine/fpu.h>
69#include <machine/pal.h>
70#include <machine/efi.h>
71#include <machine/bootinfo.h>
72#include <machine/mutex.h>
73#include <machine/vmparam.h>
74#include <machine/elf.h>
75#include <ddb/ddb.h>
76#include <alpha/alpha/db_instruction.h>
77#include <sys/vnode.h>
78#include <fs/procfs/procfs.h>
79#include <machine/sigframe.h>
80
81u_int64_t cycles_per_usec;
82u_int32_t cycles_per_sec;
83int cold = 1;
84struct bootinfo_kernel bootinfo;
85
86struct mtx sched_lock;
87struct mtx Giant;
88
89struct	user *proc0paddr;
90
91char machine[] = "ia64";
92SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
93
94static char cpu_model[128];
95SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, 0, "");
96
97#ifdef DDB
98/* start and end of kernel symbol table */
99void	*ksym_start, *ksym_end;
100#endif
101
102int	ia64_unaligned_print = 1;	/* warn about unaligned accesses */
103int	ia64_unaligned_fix = 1;	/* fix up unaligned accesses */
104int	ia64_unaligned_sigbus = 0;	/* don't SIGBUS on fixed-up accesses */
105
106SYSCTL_INT(_machdep, CPU_UNALIGNED_PRINT, unaligned_print,
107	CTLFLAG_RW, &ia64_unaligned_print, 0, "");
108
109SYSCTL_INT(_machdep, CPU_UNALIGNED_FIX, unaligned_fix,
110	CTLFLAG_RW, &ia64_unaligned_fix, 0, "");
111
112SYSCTL_INT(_machdep, CPU_UNALIGNED_SIGBUS, unaligned_sigbus,
113	CTLFLAG_RW, &ia64_unaligned_sigbus, 0, "");
114
115static void cpu_startup __P((void *));
116SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
117
118struct msgbuf *msgbufp=0;
119
120int bootverbose = 0, Maxmem = 0;
121long dumplo;
122
123int	totalphysmem;		/* total amount of physical memory in system */
124int	physmem;		/* physical memory used by NetBSD + some rsvd */
125int	resvmem;		/* amount of memory reserved for PROM */
126int	unusedmem;		/* amount of memory for OS that we don't use */
127int	unknownmem;		/* amount of memory with an unknown use */
128int	ncpus;			/* number of cpus */
129
130vm_offset_t phys_avail[10];
131
132static int
133sysctl_hw_physmem(SYSCTL_HANDLER_ARGS)
134{
135	int error = sysctl_handle_int(oidp, 0, ia64_ptob(physmem), req);
136	return (error);
137}
138
139SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD,
140	0, 0, sysctl_hw_physmem, "I", "");
141
142static int
143sysctl_hw_usermem(SYSCTL_HANDLER_ARGS)
144{
145	int error = sysctl_handle_int(oidp, 0,
146		ia64_ptob(physmem - cnt.v_wire_count), req);
147	return (error);
148}
149
150SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD,
151	0, 0, sysctl_hw_usermem, "I", "");
152
153SYSCTL_INT(_hw, OID_AUTO, availpages, CTLFLAG_RD, &physmem, 0, "");
154
155/* must be 2 less so 0 0 can signal end of chunks */
156#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
157
158static void identifycpu __P((void));
159
160struct kva_md_info kmi;
161
162static void
163cpu_startup(dummy)
164	void *dummy;
165{
166
167	/*
168	 * Good {morning,afternoon,evening,night}.
169	 */
170	identifycpu();
171
172	/* startrtclock(); */
173#ifdef PERFMON
174	perfmon_init();
175#endif
176	printf("real memory  = %ld (%ldK bytes)\n", ia64_ptob(Maxmem), ia64_ptob(Maxmem) / 1024);
177
178	/*
179	 * Display any holes after the first chunk of extended memory.
180	 */
181	if (bootverbose) {
182		int indx;
183
184		printf("Physical memory chunk(s):\n");
185		for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
186			int size1 = phys_avail[indx + 1] - phys_avail[indx];
187
188			printf("0x%08lx - 0x%08lx, %d bytes (%d pages)\n", phys_avail[indx],
189			    phys_avail[indx + 1] - 1, size1, size1 / PAGE_SIZE);
190		}
191	}
192
193	vm_ksubmap_init(&kmi);
194
195#if 0
196	/*
197	 * Calculate callout wheel size
198	 */
199	for (callwheelsize = 1, callwheelbits = 0;
200	     callwheelsize < ncallout;
201	     callwheelsize <<= 1, ++callwheelbits)
202		;
203	callwheelmask = callwheelsize - 1;
204
205	/*
206	 * Allocate space for system data structures.
207	 * The first available kernel virtual address is in "v".
208	 * As pages of kernel virtual memory are allocated, "v" is incremented.
209	 * As pages of memory are allocated and cleared,
210	 * "firstaddr" is incremented.
211	 * An index into the kernel page table corresponding to the
212	 * virtual memory address maintained in "v" is kept in "mapaddr".
213	 */
214
215	/*
216	 * Make two passes.  The first pass calculates how much memory is
217	 * needed and allocates it.  The second pass assigns virtual
218	 * addresses to the various data structures.
219	 */
220	firstaddr = 0;
221again:
222	v = (caddr_t)firstaddr;
223
224#define	valloc(name, type, num) \
225	    (name) = (type *)v; v = (caddr_t)((name)+(num))
226#define	valloclim(name, type, num, lim) \
227	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
228
229	valloc(callout, struct callout, ncallout);
230	valloc(callwheel, struct callout_tailq, callwheelsize);
231
232	/*
233	 * Discount the physical memory larger than the size of kernel_map
234	 * to avoid eating up all of KVA space.
235	 */
236	if (kernel_map->first_free == NULL) {
237		printf("Warning: no free entries in kernel_map.\n");
238		physmem_est = physmem;
239	} else
240		physmem_est = min(physmem, btoc(kernel_map->max_offset -
241		    kernel_map->min_offset));
242
243	/*
244	 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
245	 * For the first 64MB of ram nominally allocate sufficient buffers to
246	 * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
247	 * buffers to cover 1/20 of our ram over 64MB. When auto-sizing
248	 * the buffer cache we limit the eventual kva reservation to
249	 * maxbcache bytes.
250	 */
251
252	if (nbuf == 0) {
253		int factor = 4 * BKVASIZE / PAGE_SIZE;
254
255		nbuf = 50;
256		if (physmem_est > 1024)
257			nbuf += min((physmem_est - 1024) / factor,
258			    16384 / factor);
259		if (physmem_est > 16384)
260			nbuf += (physmem_est - 16384) * 2 / (factor * 5);
261		if (maxbcache && nbuf > maxbcache / BKVASIZE)
262			nbuf = maxbcache / BKVASIZE;
263	}
264	nswbuf = max(min(nbuf/4, 64), 16);
265
266	valloc(swbuf, struct buf, nswbuf);
267	valloc(buf, struct buf, nbuf);
268	v = bufhashinit(v);
269
270	/*
271	 * End of first pass, size has been calculated so allocate memory
272	 */
273	if (firstaddr == 0) {
274		size = (vm_size_t)(v - firstaddr);
275		firstaddr = (vm_offset_t)kmem_alloc(kernel_map, round_page(size));
276		if (firstaddr == 0)
277			panic("startup: no room for tables");
278		goto again;
279	}
280
281	/*
282	 * End of second pass, addresses have been assigned
283	 */
284	if ((vm_size_t)(v - firstaddr) != size)
285		panic("startup: table size inconsistency");
286
287	clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
288			(nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
289	buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
290				(nbuf*BKVASIZE));
291	buffer_map->system_map = 1;
292	pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
293				(nswbuf*MAXPHYS) + pager_map_size);
294	pager_map->system_map = 1;
295	exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
296				(16*(ARG_MAX+(PAGE_SIZE*3))));
297
298	/*
299	 * Finally, allocate mbuf pool.
300	 * XXX: Mbuf system machine-specific initializations should
301	 *      go here, if anywhere.
302	 */
303
304	/*
305	 * Initialize callouts
306	 */
307	SLIST_INIT(&callfree);
308	for (i = 0; i < ncallout; i++) {
309		callout_init(&callout[i], 0);
310		callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
311		SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
312	}
313
314	for (i = 0; i < callwheelsize; i++) {
315		TAILQ_INIT(&callwheel[i]);
316	}
317
318	mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE);
319#endif
320
321#if defined(USERCONFIG)
322#if defined(USERCONFIG_BOOT)
323	if (1)
324#else
325        if (boothowto & RB_CONFIG)
326#endif
327	{
328		userconfig();
329		cninit();	/* the preferred console may have changed */
330	}
331#endif
332
333	printf("avail memory = %ld (%ldK bytes)\n", ptoa(cnt.v_free_count),
334	    ptoa(cnt.v_free_count) / 1024);
335
336	/*
337	 * Set up buffers, so they can be used to read disk labels.
338	 */
339	bufinit();
340	vm_pager_bufferinit();
341}
342
343static void
344identifycpu(void)
345{
346	/* print cpu type & version */
347}
348
349extern char kernel_text[], _end[];
350
351#define DEBUG_MD
352
353void
354ia64_init()
355{
356	int phys_avail_cnt;
357	vm_offset_t kernstart, kernend;
358	vm_offset_t kernstartpfn, kernendpfn, pfn0, pfn1;
359	char *p;
360	EFI_MEMORY_DESCRIPTOR ski_md[2]; /* XXX */
361	EFI_MEMORY_DESCRIPTOR *mdp;
362	int mdcount, i;
363
364	/* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */
365
366	/*
367	 * TODO: Disable interrupts, floating point etc.
368	 * Maybe flush cache and tlb
369	 */
370	__asm __volatile("mov ar.fpsr=%0" :: "r"(IA64_FPSR_DEFAULT));
371
372	/*
373	 * TODO: Get critical system information (if possible, from the
374	 * information provided by the boot program).
375	 */
376
377	/*
378	 * Initalize the (temporary) bootstrap console interface, so
379	 * we can use printf until the VM system starts being setup.
380	 * The real console is initialized before then.
381	 * TODO: I guess we start with a serial console here.
382	 */
383	ssccnattach();
384
385	/* OUTPUT NOW ALLOWED */
386
387	/*
388	 * Find the beginning and end of the kernel.
389	 */
390	kernstart = trunc_page(kernel_text);
391#ifdef DDBxx
392	ksym_start = (void *)bootinfo.ssym;
393	ksym_end   = (void *)bootinfo.esym;
394	kernend = (vm_offset_t)round_page(ksym_end);
395#else
396	kernend = (vm_offset_t)round_page(_end);
397#endif
398	/* But if the bootstrap tells us otherwise, believe it! */
399	if (bootinfo.kernend)
400		kernend = round_page(bootinfo.kernend);
401	preload_metadata = (caddr_t)bootinfo.modptr;
402	if (envmode == 1)
403		kern_envp = static_env;
404	else
405		kern_envp = bootinfo.envp;
406
407	/* Init basic tunables, including hz */
408	init_param();
409
410	p = getenv("kernelname");
411	if (p)
412		strncpy(kernelname, p, sizeof(kernelname) - 1);
413
414	kernstartpfn = atop(IA64_RR_MASK(kernstart));
415	kernendpfn = atop(IA64_RR_MASK(kernend));
416
417	/*
418	 * Size the memory regions and load phys_avail[] with the results.
419	 */
420
421	/*
422	 * XXX hack for ski. In reality, the loader will probably ask
423	 * EFI and pass the results to us. Possibly, we will call EFI
424	 * directly.
425	 */
426	ski_md[0].Type = EfiConventionalMemory;
427	ski_md[0].PhysicalStart = 2L*1024*1024;
428	ski_md[0].VirtualStart = 0;
429	ski_md[0].NumberOfPages = (64L*1024*1024)>>12;
430	ski_md[0].Attribute = EFI_MEMORY_WB;
431
432	ski_md[1].Type = EfiConventionalMemory;
433	ski_md[1].PhysicalStart = 4096L*1024*1024;
434	ski_md[1].VirtualStart = 0;
435	ski_md[1].NumberOfPages = (32L*1024*1024)>>12;
436	ski_md[1].Attribute = EFI_MEMORY_WB;
437
438	mdcount = 1;		/* ignore the high memory for now */
439
440	/*
441	 * Find out how much memory is available, by looking at
442	 * the memory descriptors.
443	 */
444#ifdef DEBUG_MD
445	printf("Memory descriptor count: %d\n", mdcount);
446#endif
447
448	phys_avail_cnt = 0;
449	for (i = 0; i < mdcount; i++) {
450		mdp = &ski_md[i];
451#ifdef DEBUG_MD
452		printf("MD %d: type %d pa 0x%lx cnt 0x%lx\n", i,
453		       mdp->Type,
454		       mdp->PhysicalStart,
455		       mdp->NumberOfPages);
456#endif
457		totalphysmem += mdp->NumberOfPages;
458
459		if (mdp->Type != EfiConventionalMemory) {
460			resvmem += mdp->NumberOfPages;
461			continue;
462		}
463
464		/*
465		 * We have a memory descriptors available for system
466		 * software use.  We must determine if this cluster
467		 * holds the kernel.
468		 */
469		physmem += mdp->NumberOfPages;
470		pfn0 = atop(mdp->PhysicalStart);
471		pfn1 = pfn0 + mdp->NumberOfPages;
472		if (pfn0 <= kernendpfn && kernstartpfn <= pfn1) {
473			/*
474			 * Must compute the location of the kernel
475			 * within the segment.
476			 */
477#ifdef DEBUG_MD
478			printf("Descriptor %d contains kernel\n", i);
479#endif
480			if (pfn0 < kernstartpfn) {
481				/*
482				 * There is a chunk before the kernel.
483				 */
484#ifdef DEBUG_MD
485				printf("Loading chunk before kernel: "
486				       "0x%lx / 0x%lx\n", pfn0, kernstartpfn);
487#endif
488				phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
489				phys_avail[phys_avail_cnt+1] = ia64_ptob(kernstartpfn);
490				phys_avail_cnt += 2;
491			}
492			if (kernendpfn < pfn1) {
493				/*
494				 * There is a chunk after the kernel.
495				 */
496#ifdef DEBUG_MD
497				printf("Loading chunk after kernel: "
498				       "0x%lx / 0x%lx\n", kernendpfn, pfn1);
499#endif
500				phys_avail[phys_avail_cnt] = ia64_ptob(kernendpfn);
501				phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
502				phys_avail_cnt += 2;
503			}
504		} else {
505			/*
506			 * Just load this cluster as one chunk.
507			 */
508#ifdef DEBUG_MD
509			printf("Loading descriptor %d: 0x%lx / 0x%lx\n", i,
510			       pfn0, pfn1);
511#endif
512			phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
513			phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
514			phys_avail_cnt += 2;
515
516		}
517	}
518	phys_avail[phys_avail_cnt] = 0;
519
520	Maxmem = physmem;
521
522	/*
523	 * Initialize error message buffer (at end of core).
524	 */
525	{
526		size_t sz = round_page(MSGBUF_SIZE);
527		int i = phys_avail_cnt - 2;
528
529		/* shrink so that it'll fit in the last segment */
530		if (phys_avail[i+1] - phys_avail[i] < sz)
531			sz = phys_avail[i+1] - phys_avail[i];
532
533		phys_avail[i+1] -= sz;
534		msgbufp = (struct msgbuf*) IA64_PHYS_TO_RR7(phys_avail[i+1]);
535
536		msgbufinit(msgbufp, sz);
537
538		/* Remove the last segment if it now has no pages. */
539		if (phys_avail[i] == phys_avail[i+1])
540			phys_avail[i] = 0;
541
542		/* warn if the message buffer had to be shrunk */
543		if (sz != round_page(MSGBUF_SIZE))
544			printf("WARNING: %ld bytes not available for msgbuf in last cluster (%ld used)\n",
545			    round_page(MSGBUF_SIZE), sz);
546
547	}
548
549	/*
550	 * Init mapping for u page(s) for proc 0
551	 */
552	proc0paddr = proc0.p_addr =
553	    (struct user *)pmap_steal_memory(UPAGES * PAGE_SIZE);
554
555	/*
556	 * Setup the global data for the bootstrap cpu.
557	 */
558	{
559		size_t sz = round_page(UPAGES * PAGE_SIZE);
560		globalp = (struct globaldata *) pmap_steal_memory(sz);
561		globaldata_init(globalp, 0, sz);
562		ia64_set_k4((u_int64_t) globalp);
563		PCPU_GET(next_asn) = 1;	/* 0 used for proc0 pmap */
564	}
565
566	/*
567	 * Initialize the virtual memory system.
568	 */
569	pmap_bootstrap();
570
571	/*
572	 * Initialize the rest of proc 0's PCB.
573	 *
574	 * Set the kernel sp, reserving space for an (empty) trapframe,
575	 * and make proc0's trapframe pointer point to it for sanity.
576	 * Initialise proc0's backing store to start after u area.
577	 */
578	proc0.p_addr->u_pcb.pcb_sp =
579	    (u_int64_t)proc0.p_addr + USPACE - sizeof(struct trapframe) - 16;
580	proc0.p_addr->u_pcb.pcb_bspstore = (u_int64_t) (proc0.p_addr + 1);
581	proc0.p_frame =
582	    (struct trapframe *)(proc0.p_addr->u_pcb.pcb_sp + 16);
583
584	/* Setup curproc so that mutexes work */
585	PCPU_SET(curproc, &proc0);
586	PCPU_SET(spinlocks, NULL);
587
588	LIST_INIT(&proc0.p_contested);
589
590	/*
591	 * Initialise mutexes.
592	 */
593	mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE);
594	mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE);
595	mtx_init(&proc0.p_mtx, "process lock", MTX_DEF);
596	mtx_lock(&Giant);
597
598	/*
599	 * Look at arguments passed to us and compute boothowto.
600	 */
601	boothowto = 0;
602#ifdef KADB
603	boothowto |= RB_KDB;
604#endif
605/*	boothowto |= RB_KDB | RB_GDB; */
606	for (p = bootinfo.boot_flags; p && *p != '\0'; p++) {
607		/*
608		 * Note that we'd really like to differentiate case here,
609		 * but the Ia64 AXP Architecture Reference Manual
610		 * says that we shouldn't.
611		 */
612		switch (*p) {
613		case 'a': /* autoboot */
614		case 'A':
615			boothowto &= ~RB_SINGLE;
616			break;
617
618#ifdef DEBUG
619		case 'c': /* crash dump immediately after autoconfig */
620		case 'C':
621			boothowto |= RB_DUMP;
622			break;
623#endif
624
625#if defined(DDB)
626		case 'd': /* break into the kernel debugger ASAP */
627		case 'D':
628			boothowto |= RB_KDB;
629			break;
630		case 'g': /* use kernel gdb */
631		case 'G':
632			boothowto |= RB_GDB;
633			break;
634#endif
635
636		case 'h': /* always halt, never reboot */
637		case 'H':
638			boothowto |= RB_HALT;
639			break;
640
641#if 0
642		case 'm': /* mini root present in memory */
643		case 'M':
644			boothowto |= RB_MINIROOT;
645			break;
646#endif
647
648		case 'n': /* askname */
649		case 'N':
650			boothowto |= RB_ASKNAME;
651			break;
652
653		case 's': /* single-user (default, supported for sanity) */
654		case 'S':
655			boothowto |= RB_SINGLE;
656			break;
657
658		case 'v':
659		case 'V':
660			boothowto |= RB_VERBOSE;
661			bootverbose = 1;
662			break;
663
664		default:
665			printf("Unrecognized boot flag '%c'.\n", *p);
666			break;
667		}
668	}
669
670	/*
671	 * Catch case of boot_verbose set in environment.
672	 */
673	if ((p = getenv("boot_verbose")) != NULL) {
674		if (strcmp(p, "yes") == 0 || strcmp(p, "YES") == 0) {
675			boothowto |= RB_VERBOSE;
676			bootverbose = 1;
677		}
678	}
679
680	/*
681	 * Force single-user for a while.
682	 */
683	boothowto |= RB_SINGLE;
684
685	/*
686	 * Initialize debuggers, and break into them if appropriate.
687	 */
688#ifdef DDB
689	kdb_init();
690	if (boothowto & RB_KDB) {
691		printf("Boot flags requested debugger\n");
692		breakpoint();
693	}
694#endif
695}
696
697void
698bzero(void *buf, size_t len)
699{
700	caddr_t p = buf;
701
702	while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
703		*p++ = 0;
704		len--;
705	}
706	while (len >= sizeof(u_long) * 8) {
707		*(u_long*) p = 0;
708		*((u_long*) p + 1) = 0;
709		*((u_long*) p + 2) = 0;
710		*((u_long*) p + 3) = 0;
711		len -= sizeof(u_long) * 8;
712		*((u_long*) p + 4) = 0;
713		*((u_long*) p + 5) = 0;
714		*((u_long*) p + 6) = 0;
715		*((u_long*) p + 7) = 0;
716		p += sizeof(u_long) * 8;
717	}
718	while (len >= sizeof(u_long)) {
719		*(u_long*) p = 0;
720		len -= sizeof(u_long);
721		p += sizeof(u_long);
722	}
723	while (len) {
724		*p++ = 0;
725		len--;
726	}
727}
728
729void
730DELAY(int n)
731{
732    /* TODO */
733}
734
735/*
736 * Send an interrupt to process.
737 *
738 * Stack is set up to allow sigcode stored
739 * at top to call routine, followed by kcall
740 * to sigreturn routine below.  After sigreturn
741 * resets the signal mask, the stack, and the
742 * frame pointer, it returns to the user
743 * specified pc, psl.
744 */
745void
746sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
747{
748	struct proc *p = curproc;
749	struct trapframe *frame;
750	struct sigacts *psp;
751	struct sigframe sf, *sfp;
752	u_int64_t sbs = 0;
753	int oonstack, rndfsize;
754
755	PROC_LOCK(p);
756	psp = p->p_sigacts;
757	frame = p->p_frame;
758	oonstack = sigonstack(frame->tf_r[FRAME_SP]);
759	rndfsize = ((sizeof(sf) + 15) / 16) * 16;
760
761	/*
762	 * Make sure that we restore the entire trapframe after a
763	 * signal.
764	 */
765	frame->tf_flags &= ~FRAME_SYSCALL;
766
767	/* save user context */
768	bzero(&sf, sizeof(struct sigframe));
769	sf.sf_uc.uc_sigmask = *mask;
770	sf.sf_uc.uc_stack = p->p_sigstk;
771	sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK)
772	    ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
773	sf.sf_uc.uc_mcontext.mc_flags = IA64_MC_FLAG_ONSTACK;
774	sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
775
776	sf.sf_uc.uc_mcontext.mc_nat     = 0; /* XXX */
777	sf.sf_uc.uc_mcontext.mc_sp	= frame->tf_r[FRAME_SP];
778	sf.sf_uc.uc_mcontext.mc_ip	= (frame->tf_cr_iip
779					   | ((frame->tf_cr_ipsr >> 41) & 3));
780	sf.sf_uc.uc_mcontext.mc_cfm     = frame->tf_cr_ifs & ~(1<<31);
781	sf.sf_uc.uc_mcontext.mc_um      = frame->tf_cr_ipsr & 0x1fff;
782	sf.sf_uc.uc_mcontext.mc_ar_rsc  = frame->tf_ar_rsc;
783	sf.sf_uc.uc_mcontext.mc_ar_bsp  = frame->tf_ar_bspstore;
784	sf.sf_uc.uc_mcontext.mc_ar_rnat = frame->tf_ar_rnat;
785	sf.sf_uc.uc_mcontext.mc_ar_ccv  = frame->tf_ar_ccv;
786	sf.sf_uc.uc_mcontext.mc_ar_unat = frame->tf_ar_unat;
787	sf.sf_uc.uc_mcontext.mc_ar_fpsr = frame->tf_ar_fpsr;
788	sf.sf_uc.uc_mcontext.mc_ar_pfs  = frame->tf_ar_pfs;
789	sf.sf_uc.uc_mcontext.mc_pr      = frame->tf_pr;
790
791	bcopy(&frame->tf_b[0],
792	      &sf.sf_uc.uc_mcontext.mc_br[0],
793	      8 * sizeof(unsigned long));
794	sf.sf_uc.uc_mcontext.mc_gr[0] = 0;
795	bcopy(&frame->tf_r[0],
796	      &sf.sf_uc.uc_mcontext.mc_gr[1],
797	      31 * sizeof(unsigned long));
798
799	/* XXX mc_fr[] */
800
801	/*
802	 * Allocate and validate space for the signal handler
803	 * context. Note that if the stack is in P0 space, the
804	 * call to grow() is a nop, and the useracc() check
805	 * will fail if the process has not already allocated
806	 * the space with a `brk'.
807	 */
808	if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack &&
809	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
810		sbs = (u_int64_t) p->p_sigstk.ss_sp;
811		sfp = (struct sigframe *)((caddr_t)p->p_sigstk.ss_sp +
812		    p->p_sigstk.ss_size - rndfsize);
813		/*
814		 * Align sp and bsp.
815		 */
816		sbs = (sbs + 15) & ~15;
817		sfp = (struct sigframe *)((u_int64_t)sfp & ~15);
818#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
819		p->p_sigstk.ss_flags |= SS_ONSTACK;
820#endif
821	} else
822		sfp = (struct sigframe *)(frame->tf_r[FRAME_SP] - rndfsize);
823	PROC_UNLOCK(p);
824
825	(void)grow_stack(p, (u_long)sfp);
826#ifdef DEBUG
827	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
828		printf("sendsig(%d): sig %d ssp %p usp %p\n", p->p_pid,
829		       sig, &sf, sfp);
830#endif
831	if (!useracc((caddr_t)sfp, sizeof(sf), VM_PROT_WRITE)) {
832#ifdef DEBUG
833		if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
834			printf("sendsig(%d): useracc failed on sig %d\n",
835			       p->p_pid, sig);
836#endif
837		/*
838		 * Process has trashed its stack; give it an illegal
839		 * instruction to halt it in its tracks.
840		 */
841		PROC_LOCK(p);
842		SIGACTION(p, SIGILL) = SIG_DFL;
843		SIGDELSET(p->p_sigignore, SIGILL);
844		SIGDELSET(p->p_sigcatch, SIGILL);
845		SIGDELSET(p->p_sigmask, SIGILL);
846		psignal(p, SIGILL);
847		PROC_UNLOCK(p);
848		return;
849	}
850
851#if 0
852	/* save the floating-point state, if necessary, then copy it. */
853	ia64_fpstate_save(p, 1);
854	sf.sf_uc.uc_mcontext.mc_ownedfp = p->p_md.md_flags & MDP_FPUSED;
855	bcopy(&p->p_addr->u_pcb.pcb_fp,
856	      (struct fpreg *)sf.sf_uc.uc_mcontext.mc_fpregs,
857	      sizeof(struct fpreg));
858	sf.sf_uc.uc_mcontext.mc_fp_control = p->p_addr->u_pcb.pcb_fp_control;
859#endif
860
861	/*
862	 * copy the frame out to userland.
863	 */
864	(void) copyout((caddr_t)&sf, (caddr_t)sfp, sizeof(sf));
865#ifdef DEBUG
866	if (sigdebug & SDB_FOLLOW)
867		printf("sendsig(%d): sig %d sfp %p code %lx\n", p->p_pid, sig,
868		    sfp, code);
869#endif
870
871	/*
872	 * Set up the registers to return to sigcode.
873	 */
874	frame->tf_cr_ipsr &= ~IA64_PSR_RI;
875	frame->tf_cr_iip = PS_STRINGS - (esigcode - sigcode);
876	frame->tf_r[FRAME_R1] = sig;
877	PROC_LOCK(p);
878	if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) {
879		frame->tf_r[FRAME_R15] = (u_int64_t)&(sfp->sf_si);
880
881		/* Fill in POSIX parts */
882		sf.sf_si.si_signo = sig;
883		sf.sf_si.si_code = code;
884		sf.sf_si.si_addr = (void*)frame->tf_cr_ifa;
885	}
886	else
887		frame->tf_r[FRAME_R15] = code;
888	PROC_UNLOCK(p);
889
890	frame->tf_r[FRAME_SP] = (u_int64_t)sfp - 16;
891	frame->tf_r[FRAME_R14] = sig;
892	frame->tf_r[FRAME_R15] = (u_int64_t) &sfp->sf_si;
893	frame->tf_r[FRAME_R16] = (u_int64_t) &sfp->sf_uc;
894	frame->tf_r[FRAME_R17] = (u_int64_t)catcher;
895	frame->tf_r[FRAME_R18] = sbs;
896
897#ifdef DEBUG
898	if (sigdebug & SDB_FOLLOW)
899		printf("sendsig(%d): pc %lx, catcher %lx\n", p->p_pid,
900		    frame->tf_cr_iip, frame->tf_regs[FRAME_R4]);
901	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
902		printf("sendsig(%d): sig %d returns\n",
903		    p->p_pid, sig);
904#endif
905}
906
907/*
908 * System call to cleanup state after a signal
909 * has been taken.  Reset signal mask and
910 * stack state from context left by sendsig (above).
911 * Return to previous pc and psl as specified by
912 * context left by sendsig. Check carefully to
913 * make sure that the user has not modified the
914 * state to gain improper privileges.
915 */
916#ifdef COMPAT_43
917int
918osigreturn(struct proc *p,
919	struct osigreturn_args /* {
920		struct osigcontext *sigcntxp;
921	} */ *uap)
922{
923	return EOPNOTSUPP;
924}
925#endif
926
927/*
928 * System call to cleanup state after a signal
929 * has been taken.  Reset signal mask and
930 * stack state from context left by sendsig (above).
931 * Return to previous pc and psl as specified by
932 * context left by sendsig. Check carefully to
933 * make sure that the user has not modified the
934 * state to gain improper privileges.
935 */
936
937int
938sigreturn(struct proc *p,
939	struct sigreturn_args /* {
940		ucontext_t *sigcntxp;
941	} */ *uap)
942{
943	ucontext_t uc, *ucp;
944	struct pcb *pcb;
945	struct trapframe *frame = p->p_frame;
946	struct __mcontext *mcp;
947
948	ucp = uap->sigcntxp;
949	pcb = &p->p_addr->u_pcb;
950
951#ifdef DEBUG
952	if (sigdebug & SDB_FOLLOW)
953	    printf("sigreturn: pid %d, scp %p\n", p->p_pid, ucp);
954#endif
955
956	/*
957	 * Fetch the entire context structure at once for speed.
958	 * We don't use a normal argument to simplify RSE handling.
959	 */
960	if (copyin((caddr_t)frame->tf_r[FRAME_R4],
961		   (caddr_t)&uc, sizeof(ucontext_t)))
962		return (EFAULT);
963
964	/*
965	 * Restore the user-supplied information
966	 */
967	mcp = &uc.uc_mcontext;
968	bcopy(&mcp->mc_br[0], &frame->tf_b[0], 8*sizeof(u_int64_t));
969	bcopy(&mcp->mc_gr[1], &frame->tf_r[0], 31*sizeof(u_int64_t));
970	/* XXX mc_fr */
971
972	frame->tf_flags &= ~FRAME_SYSCALL;
973	frame->tf_cr_iip = mcp->mc_ip & ~15;
974	frame->tf_cr_ipsr &= ~IA64_PSR_RI;
975	switch (mcp->mc_ip & 15) {
976	case 1:
977		frame->tf_cr_ipsr |= IA64_PSR_RI_1;
978		break;
979	case 2:
980		frame->tf_cr_ipsr |= IA64_PSR_RI_2;
981		break;
982	}
983	frame->tf_cr_ipsr     = ((frame->tf_cr_ipsr & ~0x1fff)
984				 | (mcp->mc_um & 0x1fff));
985	frame->tf_pr          = mcp->mc_pr;
986	frame->tf_ar_rsc      = (mcp->mc_ar_rsc & 3) | 12; /* user, loadrs=0 */
987	frame->tf_ar_pfs      = mcp->mc_ar_pfs;
988	frame->tf_cr_ifs      = mcp->mc_cfm | (1UL<<63);
989	frame->tf_ar_bspstore = mcp->mc_ar_bsp;
990	frame->tf_ar_rnat     = mcp->mc_ar_rnat;
991	frame->tf_ndirty      = 0; /* assumes flushrs in sigcode */
992	frame->tf_ar_unat     = mcp->mc_ar_unat;
993	frame->tf_ar_ccv      = mcp->mc_ar_ccv;
994	frame->tf_ar_fpsr     = mcp->mc_ar_fpsr;
995
996	frame->tf_r[FRAME_SP] = mcp->mc_sp;
997
998	PROC_LOCK(p);
999#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1000	if (uc.uc_mcontext.mc_onstack & 1)
1001		p->p_sigstk.ss_flags |= SS_ONSTACK;
1002	else
1003		p->p_sigstk.ss_flags &= ~SS_ONSTACK;
1004#endif
1005
1006	p->p_sigmask = uc.uc_sigmask;
1007	SIG_CANTMASK(p->p_sigmask);
1008	PROC_UNLOCK(p);
1009
1010	/* XXX ksc.sc_ownedfp ? */
1011	ia64_fpstate_drop(p);
1012#if 0
1013	bcopy((struct fpreg *)uc.uc_mcontext.mc_fpregs,
1014	      &p->p_addr->u_pcb.pcb_fp, sizeof(struct fpreg));
1015	p->p_addr->u_pcb.pcb_fp_control =
1016		uc.uc_mcontext.mc_fp_control;
1017#endif
1018
1019#ifdef DEBUG
1020	if (sigdebug & SDB_FOLLOW)
1021		printf("sigreturn(%d): returns\n", p->p_pid);
1022#endif
1023	return (EJUSTRETURN);
1024}
1025
1026/*
1027 * Machine dependent boot() routine
1028 *
1029 * I haven't seen anything to put here yet
1030 * Possibly some stuff might be grafted back here from boot()
1031 */
1032void
1033cpu_boot(int howto)
1034{
1035}
1036
1037/*
1038 * Shutdown the CPU as much as possible
1039 */
1040void
1041cpu_halt(void)
1042{
1043    /* TODO */
1044}
1045
1046/*
1047 * Clear registers on exec
1048 */
1049void
1050setregs(struct proc *p, u_long entry, u_long stack, u_long ps_strings)
1051{
1052	struct trapframe *frame;
1053
1054	frame = p->p_frame;
1055
1056	/*
1057	 * Make sure that we restore the entire trapframe after an
1058	 * execve.
1059	 */
1060	frame->tf_flags &= ~FRAME_SYSCALL;
1061
1062	bzero(frame->tf_r, sizeof(frame->tf_r));
1063	bzero(frame->tf_f, sizeof(frame->tf_f));
1064	frame->tf_cr_iip = entry;
1065	frame->tf_cr_ipsr = (IA64_PSR_IC
1066			     | IA64_PSR_I
1067			     | IA64_PSR_IT
1068			     | IA64_PSR_DT
1069			     | IA64_PSR_RT
1070			     | IA64_PSR_DFH
1071			     | IA64_PSR_BN
1072			     | IA64_PSR_CPL_USER);
1073	frame->tf_r[FRAME_SP] = stack;
1074	frame->tf_r[FRAME_R14] = ps_strings;
1075
1076	/*
1077	 * Setup the new backing store and make sure the new image
1078	 * starts executing with an empty register stack frame.
1079	 */
1080	frame->tf_ar_bspstore = p->p_md.md_bspstore;
1081	frame->tf_ndirty = 0;
1082	frame->tf_cr_ifs = (1L<<63); /* ifm=0, v=1 */
1083	frame->tf_ar_rsc = 0xf;	/* user mode rsc */
1084	frame->tf_ar_fpsr = IA64_FPSR_DEFAULT;
1085
1086	p->p_md.md_flags &= ~MDP_FPUSED;
1087	ia64_fpstate_drop(p);
1088}
1089
1090int
1091ptrace_set_pc(struct proc *p, unsigned long addr)
1092{
1093	/* TODO set pc in trapframe */
1094	return 0;
1095}
1096
1097int
1098ptrace_single_step(struct proc *p)
1099{
1100	/* TODO arrange for user process to single step */
1101	return 0;
1102}
1103
1104int
1105ia64_pa_access(vm_offset_t pa)
1106{
1107	return VM_PROT_READ|VM_PROT_WRITE;
1108}
1109
1110int
1111fill_regs(p, regs)
1112	struct proc *p;
1113	struct reg *regs;
1114{
1115	/* TODO copy trapframe to regs */
1116	return (0);
1117}
1118
1119int
1120set_regs(p, regs)
1121	struct proc *p;
1122	struct reg *regs;
1123{
1124	/* TODO copy regs to trapframe */
1125	return (0);
1126}
1127
1128int
1129fill_fpregs(p, fpregs)
1130	struct proc *p;
1131	struct fpreg *fpregs;
1132{
1133	/* TODO copy fpu state to fpregs */
1134	ia64_fpstate_save(p, 0);
1135
1136#if 0
1137	bcopy(&p->p_addr->u_pcb.pcb_fp, fpregs, sizeof *fpregs);
1138#endif
1139	return (0);
1140}
1141
1142int
1143set_fpregs(p, fpregs)
1144	struct proc *p;
1145	struct fpreg *fpregs;
1146{
1147	/* TODO copy fpregs fpu state */
1148	ia64_fpstate_drop(p);
1149
1150#if 0
1151	bcopy(fpregs, &p->p_addr->u_pcb.pcb_fp, sizeof *fpregs);
1152#endif
1153	return (0);
1154}
1155
1156#ifndef DDB
1157void
1158Debugger(const char *msg)
1159{
1160	printf("Debugger(\"%s\") called.\n", msg);
1161}
1162#endif /* no DDB */
1163
1164#include <sys/disklabel.h>
1165
1166/*
1167 * Determine the size of the transfer, and make sure it is
1168 * within the boundaries of the partition. Adjust transfer
1169 * if needed, and signal errors or early completion.
1170 */
1171int
1172bounds_check_with_label(struct bio *bp, struct disklabel *lp, int wlabel)
1173{
1174#if 0
1175        struct partition *p = lp->d_partitions + dkpart(bp->bio_dev);
1176        int labelsect = lp->d_partitions[0].p_offset;
1177        int maxsz = p->p_size,
1178                sz = (bp->bio_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT;
1179
1180        /* overwriting disk label ? */
1181        /* XXX should also protect bootstrap in first 8K */
1182        if (bp->bio_blkno + p->p_offset <= LABELSECTOR + labelsect &&
1183#if LABELSECTOR != 0
1184            bp->bio_blkno + p->p_offset + sz > LABELSECTOR + labelsect &&
1185#endif
1186            (bp->bio_cmd == BIO_WRITE) && wlabel == 0) {
1187                bp->bio_error = EROFS;
1188                goto bad;
1189        }
1190
1191#if     defined(DOSBBSECTOR) && defined(notyet)
1192        /* overwriting master boot record? */
1193        if (bp->bio_blkno + p->p_offset <= DOSBBSECTOR &&
1194            (bp->bio_cmd == BIO_WRITE) && wlabel == 0) {
1195                bp->bio_error = EROFS;
1196                goto bad;
1197        }
1198#endif
1199
1200        /* beyond partition? */
1201        if (bp->bio_blkno < 0 || bp->bio_blkno + sz > maxsz) {
1202                /* if exactly at end of disk, return an EOF */
1203                if (bp->bio_blkno == maxsz) {
1204                        bp->bio_resid = bp->bio_bcount;
1205                        return(0);
1206                }
1207                /* or truncate if part of it fits */
1208                sz = maxsz - bp->bio_blkno;
1209                if (sz <= 0) {
1210                        bp->bio_error = EINVAL;
1211                        goto bad;
1212                }
1213                bp->bio_bcount = sz << DEV_BSHIFT;
1214        }
1215
1216        bp->bio_pblkno = bp->bio_blkno + p->p_offset;
1217        return(1);
1218
1219bad:
1220#endif
1221        bp->bio_flags |= BIO_ERROR;
1222        return(-1);
1223
1224}
1225
1226static int
1227sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS)
1228{
1229	int error;
1230	error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2,
1231		req);
1232	if (!error && req->newptr)
1233		resettodr();
1234	return (error);
1235}
1236
1237SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW,
1238	&adjkerntz, 0, sysctl_machdep_adjkerntz, "I", "");
1239
1240SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set,
1241	CTLFLAG_RW, &disable_rtc_set, 0, "");
1242
1243SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock,
1244	CTLFLAG_RW, &wall_cmos_clock, 0, "");
1245
1246void
1247ia64_fpstate_check(struct proc *p)
1248{
1249	if ((p->p_frame->tf_cr_ipsr & IA64_PSR_DFH) == 0)
1250		if (p != PCPU_GET(fpcurproc))
1251			panic("ia64_check_fpcurproc: bogus");
1252}
1253
1254/*
1255 * Save the high floating point state in the pcb. Use this to get
1256 * read-only access to the floating point state. If write is true, the
1257 * current fp process is cleared so that fp state can safely be
1258 * modified. The process will automatically reload the changed state
1259 * by generating a disabled fp trap.
1260 */
1261void
1262ia64_fpstate_save(struct proc *p, int write)
1263{
1264	if (p == PCPU_GET(fpcurproc)) {
1265		/*
1266		 * Save the state in the pcb.
1267		 */
1268		savehighfp(p->p_addr->u_pcb.pcb_highfp);
1269
1270		if (write) {
1271			p->p_frame->tf_cr_ipsr |= IA64_PSR_DFH;
1272			PCPU_SET(fpcurproc, NULL);
1273		}
1274	}
1275}
1276
1277/*
1278 * Relinquish ownership of the FP state. This is called instead of
1279 * ia64_save_fpstate() if the entire FP state is being changed
1280 * (e.g. on sigreturn).
1281 */
1282void
1283ia64_fpstate_drop(struct proc *p)
1284{
1285	if (p == PCPU_GET(fpcurproc)) {
1286		p->p_frame->tf_cr_ipsr |= IA64_PSR_DFH;
1287		PCPU_SET(fpcurproc, NULL);
1288	}
1289}
1290
1291/*
1292 * Switch the current owner of the fp state to p, reloading the state
1293 * from the pcb.
1294 */
1295void
1296ia64_fpstate_switch(struct proc *p)
1297{
1298	if (PCPU_GET(fpcurproc)) {
1299		/*
1300		 * Dump the old fp state if its valid.
1301		 */
1302		savehighfp(PCPU_GET(fpcurproc)->p_addr->u_pcb.pcb_highfp);
1303		PCPU_GET(fpcurproc)->p_frame->tf_cr_ipsr |= IA64_PSR_DFH;
1304	}
1305
1306	/*
1307	 * Remember the new FP owner and reload its state.
1308	 */
1309	PCPU_SET(fpcurproc, p);
1310	restorehighfp(p->p_addr->u_pcb.pcb_highfp);
1311	p->p_frame->tf_cr_ipsr &= ~IA64_PSR_DFH;
1312
1313	p->p_md.md_flags |= MDP_FPUSED;
1314}
1315
1316/*
1317 * Initialise a struct globaldata.
1318 */
1319void
1320globaldata_init(struct globaldata *globaldata, int cpuid, size_t sz)
1321{
1322	bzero(globaldata, sz);
1323	globaldata->gd_cpuid = cpuid;
1324	globaldata_register(globaldata);
1325}
1326