machdep.c revision 297284
1/*	$NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $	*/
2
3/*-
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 1994-1998 Mark Brinicombe.
6 * Copyright (c) 1994 Brini.
7 * All rights reserved.
8 *
9 * This code is derived from software written for Brini by Mark Brinicombe
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by Mark Brinicombe
22 *	for the NetBSD Project.
23 * 4. The name of the company nor the name of the author may be used to
24 *    endorse or promote products derived from this software without specific
25 *    prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * Machine dependant functions for kernel setup
40 *
41 * Created      : 17/09/94
42 * Updated	: 18/04/01 updated for new wscons
43 */
44
45#include "opt_compat.h"
46#include "opt_ddb.h"
47#include "opt_kstack_pages.h"
48#include "opt_platform.h"
49#include "opt_sched.h"
50#include "opt_timer.h"
51
52#include <sys/cdefs.h>
53__FBSDID("$FreeBSD: head/sys/arm/arm/machdep.c 297284 2016-03-26 06:55:55Z mmel $");
54
55#include <sys/param.h>
56#include <sys/proc.h>
57#include <sys/systm.h>
58#include <sys/bio.h>
59#include <sys/buf.h>
60#include <sys/bus.h>
61#include <sys/cons.h>
62#include <sys/cpu.h>
63#include <sys/efi.h>
64#include <sys/exec.h>
65#include <sys/imgact.h>
66#include <sys/kdb.h>
67#include <sys/kernel.h>
68#include <sys/ktr.h>
69#include <sys/linker.h>
70#include <sys/lock.h>
71#include <sys/malloc.h>
72#include <sys/msgbuf.h>
73#include <sys/mutex.h>
74#include <sys/pcpu.h>
75#include <sys/ptrace.h>
76#include <sys/reboot.h>
77#include <sys/rwlock.h>
78#include <sys/sched.h>
79#include <sys/signalvar.h>
80#include <sys/syscallsubr.h>
81#include <sys/sysctl.h>
82#include <sys/sysent.h>
83#include <sys/sysproto.h>
84#include <sys/uio.h>
85#include <sys/vdso.h>
86
87#include <vm/vm.h>
88#include <vm/pmap.h>
89#include <vm/vm_map.h>
90#include <vm/vm_object.h>
91#include <vm/vm_page.h>
92#include <vm/vm_pager.h>
93
94#include <machine/acle-compat.h>
95#include <machine/armreg.h>
96#include <machine/atags.h>
97#include <machine/cpu.h>
98#include <machine/cpuinfo.h>
99#include <machine/debug_monitor.h>
100#include <machine/db_machdep.h>
101#include <machine/devmap.h>
102#include <machine/frame.h>
103#include <machine/intr.h>
104#include <machine/machdep.h>
105#include <machine/md_var.h>
106#include <machine/metadata.h>
107#include <machine/pcb.h>
108#include <machine/physmem.h>
109#include <machine/platform.h>
110#include <machine/reg.h>
111#include <machine/trap.h>
112#include <machine/undefined.h>
113#include <machine/vfp.h>
114#include <machine/vmparam.h>
115#include <machine/sysarch.h>
116
117#ifdef FDT
118#include <contrib/libfdt/libfdt.h>
119#include <dev/fdt/fdt_common.h>
120#include <dev/ofw/openfirm.h>
121#endif
122
123#ifdef DDB
124#include <ddb/ddb.h>
125
126#if __ARM_ARCH >= 6
127
128DB_SHOW_COMMAND(cp15, db_show_cp15)
129{
130	u_int reg;
131
132	reg = cp15_midr_get();
133	db_printf("Cpu ID: 0x%08x\n", reg);
134	reg = cp15_ctr_get();
135	db_printf("Current Cache Lvl ID: 0x%08x\n",reg);
136
137	reg = cp15_sctlr_get();
138	db_printf("Ctrl: 0x%08x\n",reg);
139	reg = cp15_actlr_get();
140	db_printf("Aux Ctrl: 0x%08x\n",reg);
141
142	reg = cp15_id_pfr0_get();
143	db_printf("Processor Feat 0: 0x%08x\n", reg);
144	reg = cp15_id_pfr1_get();
145	db_printf("Processor Feat 1: 0x%08x\n", reg);
146	reg = cp15_id_dfr0_get();
147	db_printf("Debug Feat 0: 0x%08x\n", reg);
148	reg = cp15_id_afr0_get();
149	db_printf("Auxiliary Feat 0: 0x%08x\n", reg);
150	reg = cp15_id_mmfr0_get();
151	db_printf("Memory Model Feat 0: 0x%08x\n", reg);
152	reg = cp15_id_mmfr1_get();
153	db_printf("Memory Model Feat 1: 0x%08x\n", reg);
154	reg = cp15_id_mmfr2_get();
155	db_printf("Memory Model Feat 2: 0x%08x\n", reg);
156	reg = cp15_id_mmfr3_get();
157	db_printf("Memory Model Feat 3: 0x%08x\n", reg);
158	reg = cp15_ttbr_get();
159	db_printf("TTB0: 0x%08x\n", reg);
160}
161
162DB_SHOW_COMMAND(vtop, db_show_vtop)
163{
164	u_int reg;
165
166	if (have_addr) {
167		cp15_ats1cpr_set(addr);
168		reg = cp15_par_get();
169		db_printf("Physical address reg: 0x%08x\n",reg);
170	} else
171		db_printf("show vtop <virt_addr>\n");
172}
173#endif /* __ARM_ARCH >= 6 */
174#endif /* DDB */
175
176#ifdef DEBUG
177#define	debugf(fmt, args...) printf(fmt, ##args)
178#else
179#define	debugf(fmt, args...)
180#endif
181
182#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
183    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) || \
184    defined(COMPAT_FREEBSD9)
185#error FreeBSD/arm doesn't provide compatibility with releases prior to 10
186#endif
187
188struct pcpu __pcpu[MAXCPU];
189struct pcpu *pcpup = &__pcpu[0];
190
191static struct trapframe proc0_tf;
192uint32_t cpu_reset_address = 0;
193int cold = 1;
194vm_offset_t vector_page;
195
196int (*_arm_memcpy)(void *, void *, int, int) = NULL;
197int (*_arm_bzero)(void *, int, int) = NULL;
198int _min_memcpy_size = 0;
199int _min_bzero_size = 0;
200
201extern int *end;
202
203#ifdef FDT
204static char *loader_envp;
205
206vm_paddr_t pmap_pa;
207
208#if __ARM_ARCH >= 6
209vm_offset_t systempage;
210vm_offset_t irqstack;
211vm_offset_t undstack;
212vm_offset_t abtstack;
213#else
214/*
215 * This is the number of L2 page tables required for covering max
216 * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf,
217 * stacks etc.), uprounded to be divisible by 4.
218 */
219#define KERNEL_PT_MAX	78
220
221static struct pv_addr kernel_pt_table[KERNEL_PT_MAX];
222
223struct pv_addr systempage;
224static struct pv_addr msgbufpv;
225struct pv_addr irqstack;
226struct pv_addr undstack;
227struct pv_addr abtstack;
228static struct pv_addr kernelstack;
229#endif
230#endif
231
232#if defined(LINUX_BOOT_ABI)
233#define LBABI_MAX_BANKS	10
234
235uint32_t board_id;
236struct arm_lbabi_tag *atag_list;
237char linux_command_line[LBABI_MAX_COMMAND_LINE + 1];
238char atags[LBABI_MAX_COMMAND_LINE * 2];
239uint32_t memstart[LBABI_MAX_BANKS];
240uint32_t memsize[LBABI_MAX_BANKS];
241uint32_t membanks;
242#endif
243
244static uint32_t board_revision;
245/* hex representation of uint64_t */
246static char board_serial[32];
247
248SYSCTL_NODE(_hw, OID_AUTO, board, CTLFLAG_RD, 0, "Board attributes");
249SYSCTL_UINT(_hw_board, OID_AUTO, revision, CTLFLAG_RD,
250    &board_revision, 0, "Board revision");
251SYSCTL_STRING(_hw_board, OID_AUTO, serial, CTLFLAG_RD,
252    board_serial, 0, "Board serial");
253
254int vfp_exists;
255SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD,
256    &vfp_exists, 0, "Floating point support enabled");
257
258void
259board_set_serial(uint64_t serial)
260{
261
262	snprintf(board_serial, sizeof(board_serial)-1,
263		    "%016jx", serial);
264}
265
266void
267board_set_revision(uint32_t revision)
268{
269
270	board_revision = revision;
271}
272
273void
274sendsig(catcher, ksi, mask)
275	sig_t catcher;
276	ksiginfo_t *ksi;
277	sigset_t *mask;
278{
279	struct thread *td;
280	struct proc *p;
281	struct trapframe *tf;
282	struct sigframe *fp, frame;
283	struct sigacts *psp;
284	struct sysentvec *sysent;
285	int onstack;
286	int sig;
287	int code;
288
289	td = curthread;
290	p = td->td_proc;
291	PROC_LOCK_ASSERT(p, MA_OWNED);
292	sig = ksi->ksi_signo;
293	code = ksi->ksi_code;
294	psp = p->p_sigacts;
295	mtx_assert(&psp->ps_mtx, MA_OWNED);
296	tf = td->td_frame;
297	onstack = sigonstack(tf->tf_usr_sp);
298
299	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
300	    catcher, sig);
301
302	/* Allocate and validate space for the signal handler context. */
303	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) &&
304	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
305		fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
306		    td->td_sigstk.ss_size);
307#if defined(COMPAT_43)
308		td->td_sigstk.ss_flags |= SS_ONSTACK;
309#endif
310	} else
311		fp = (struct sigframe *)td->td_frame->tf_usr_sp;
312
313	/* make room on the stack */
314	fp--;
315
316	/* make the stack aligned */
317	fp = (struct sigframe *)STACKALIGN(fp);
318	/* Populate the siginfo frame. */
319	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
320	frame.sf_si = ksi->ksi_info;
321	frame.sf_uc.uc_sigmask = *mask;
322	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK )
323	    ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
324	frame.sf_uc.uc_stack = td->td_sigstk;
325	mtx_unlock(&psp->ps_mtx);
326	PROC_UNLOCK(td->td_proc);
327
328	/* Copy the sigframe out to the user's stack. */
329	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
330		/* Process has trashed its stack. Kill it. */
331		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
332		PROC_LOCK(p);
333		sigexit(td, SIGILL);
334	}
335
336	/*
337	 * Build context to run handler in.  We invoke the handler
338	 * directly, only returning via the trampoline.  Note the
339	 * trampoline version numbers are coordinated with machine-
340	 * dependent code in libc.
341	 */
342
343	tf->tf_r0 = sig;
344	tf->tf_r1 = (register_t)&fp->sf_si;
345	tf->tf_r2 = (register_t)&fp->sf_uc;
346
347	/* the trampoline uses r5 as the uc address */
348	tf->tf_r5 = (register_t)&fp->sf_uc;
349	tf->tf_pc = (register_t)catcher;
350	tf->tf_usr_sp = (register_t)fp;
351	sysent = p->p_sysent;
352	if (sysent->sv_sigcode_base != 0)
353		tf->tf_usr_lr = (register_t)sysent->sv_sigcode_base;
354	else
355		tf->tf_usr_lr = (register_t)(sysent->sv_psstrings -
356		    *(sysent->sv_szsigcode));
357	/* Set the mode to enter in the signal handler */
358#if __ARM_ARCH >= 7
359	if ((register_t)catcher & 1)
360		tf->tf_spsr |= PSR_T;
361	else
362		tf->tf_spsr &= ~PSR_T;
363#endif
364
365	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr,
366	    tf->tf_usr_sp);
367
368	PROC_LOCK(p);
369	mtx_lock(&psp->ps_mtx);
370}
371
372struct kva_md_info kmi;
373
374/*
375 * arm32_vector_init:
376 *
377 *	Initialize the vector page, and select whether or not to
378 *	relocate the vectors.
379 *
380 *	NOTE: We expect the vector page to be mapped at its expected
381 *	destination.
382 */
383
384extern unsigned int page0[], page0_data[];
385void
386arm_vector_init(vm_offset_t va, int which)
387{
388	unsigned int *vectors = (int *) va;
389	unsigned int *vectors_data = vectors + (page0_data - page0);
390	int vec;
391
392	/*
393	 * Loop through the vectors we're taking over, and copy the
394	 * vector's insn and data word.
395	 */
396	for (vec = 0; vec < ARM_NVEC; vec++) {
397		if ((which & (1 << vec)) == 0) {
398			/* Don't want to take over this vector. */
399			continue;
400		}
401		vectors[vec] = page0[vec];
402		vectors_data[vec] = page0_data[vec];
403	}
404
405	/* Now sync the vectors. */
406	icache_sync(va, (ARM_NVEC * 2) * sizeof(u_int));
407
408	vector_page = va;
409
410	if (va == ARM_VECTORS_HIGH) {
411		/*
412		 * Assume the MD caller knows what it's doing here, and
413		 * really does want the vector page relocated.
414		 *
415		 * Note: This has to be done here (and not just in
416		 * cpu_setup()) because the vector page needs to be
417		 * accessible *before* cpu_startup() is called.
418		 * Think ddb(9) ...
419		 *
420		 * NOTE: If the CPU control register is not readable,
421		 * this will totally fail!  We'll just assume that
422		 * any system that has high vector support has a
423		 * readable CPU control register, for now.  If we
424		 * ever encounter one that does not, we'll have to
425		 * rethink this.
426		 */
427		cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
428	}
429}
430
431static void
432cpu_startup(void *dummy)
433{
434	struct pcb *pcb = thread0.td_pcb;
435	const unsigned int mbyte = 1024 * 1024;
436#if __ARM_ARCH < 6 && !defined(ARM_CACHE_LOCK_ENABLE)
437	vm_page_t m;
438#endif
439
440	identify_arm_cpu();
441
442	vm_ksubmap_init(&kmi);
443
444	/*
445	 * Display the RAM layout.
446	 */
447	printf("real memory  = %ju (%ju MB)\n",
448	    (uintmax_t)arm32_ptob(realmem),
449	    (uintmax_t)arm32_ptob(realmem) / mbyte);
450	printf("avail memory = %ju (%ju MB)\n",
451	    (uintmax_t)arm32_ptob(vm_cnt.v_free_count),
452	    (uintmax_t)arm32_ptob(vm_cnt.v_free_count) / mbyte);
453	if (bootverbose) {
454		arm_physmem_print_tables();
455		arm_devmap_print_table();
456	}
457
458	bufinit();
459	vm_pager_bufferinit();
460	pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack +
461	    USPACE_SVC_STACK_TOP;
462	pmap_set_pcb_pagedir(kernel_pmap, pcb);
463#if __ARM_ARCH < 6
464	vector_page_setprot(VM_PROT_READ);
465	pmap_postinit();
466#ifdef ARM_CACHE_LOCK_ENABLE
467	pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
468	arm_lock_cache_line(ARM_TP_ADDRESS);
469#else
470	m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO);
471	pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m));
472#endif
473	*(uint32_t *)ARM_RAS_START = 0;
474	*(uint32_t *)ARM_RAS_END = 0xffffffff;
475#endif
476}
477
478SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
479
480/*
481 * Flush the D-cache for non-DMA I/O so that the I-cache can
482 * be made coherent later.
483 */
484void
485cpu_flush_dcache(void *ptr, size_t len)
486{
487
488	dcache_wb_poc((vm_offset_t)ptr, (vm_paddr_t)vtophys(ptr), len);
489}
490
491/* Get current clock frequency for the given cpu id. */
492int
493cpu_est_clockrate(int cpu_id, uint64_t *rate)
494{
495
496	return (ENXIO);
497}
498
499void
500cpu_idle(int busy)
501{
502
503	CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu);
504	spinlock_enter();
505#ifndef NO_EVENTTIMERS
506	if (!busy)
507		cpu_idleclock();
508#endif
509	if (!sched_runnable())
510		cpu_sleep(0);
511#ifndef NO_EVENTTIMERS
512	if (!busy)
513		cpu_activeclock();
514#endif
515	spinlock_exit();
516	CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu);
517}
518
519int
520cpu_idle_wakeup(int cpu)
521{
522
523	return (0);
524}
525
526/*
527 * Most ARM platforms don't need to do anything special to init their clocks
528 * (they get intialized during normal device attachment), and by not defining a
529 * cpu_initclocks() function they get this generic one.  Any platform that needs
530 * to do something special can just provide their own implementation, which will
531 * override this one due to the weak linkage.
532 */
533void
534arm_generic_initclocks(void)
535{
536
537#ifndef NO_EVENTTIMERS
538#ifdef SMP
539	if (PCPU_GET(cpuid) == 0)
540		cpu_initclocks_bsp();
541	else
542		cpu_initclocks_ap();
543#else
544	cpu_initclocks_bsp();
545#endif
546#endif
547}
548__weak_reference(arm_generic_initclocks, cpu_initclocks);
549
550int
551fill_regs(struct thread *td, struct reg *regs)
552{
553	struct trapframe *tf = td->td_frame;
554	bcopy(&tf->tf_r0, regs->r, sizeof(regs->r));
555	regs->r_sp = tf->tf_usr_sp;
556	regs->r_lr = tf->tf_usr_lr;
557	regs->r_pc = tf->tf_pc;
558	regs->r_cpsr = tf->tf_spsr;
559	return (0);
560}
561int
562fill_fpregs(struct thread *td, struct fpreg *regs)
563{
564	bzero(regs, sizeof(*regs));
565	return (0);
566}
567
568int
569set_regs(struct thread *td, struct reg *regs)
570{
571	struct trapframe *tf = td->td_frame;
572
573	bcopy(regs->r, &tf->tf_r0, sizeof(regs->r));
574	tf->tf_usr_sp = regs->r_sp;
575	tf->tf_usr_lr = regs->r_lr;
576	tf->tf_pc = regs->r_pc;
577	tf->tf_spsr &=  ~PSR_FLAGS;
578	tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS;
579	return (0);
580}
581
582int
583set_fpregs(struct thread *td, struct fpreg *regs)
584{
585	return (0);
586}
587
588int
589fill_dbregs(struct thread *td, struct dbreg *regs)
590{
591	return (0);
592}
593int
594set_dbregs(struct thread *td, struct dbreg *regs)
595{
596	return (0);
597}
598
599
600static int
601ptrace_read_int(struct thread *td, vm_offset_t addr, uint32_t *v)
602{
603
604	if (proc_readmem(td, td->td_proc, addr, v, sizeof(*v)) != sizeof(*v))
605		return (ENOMEM);
606	return (0);
607}
608
609static int
610ptrace_write_int(struct thread *td, vm_offset_t addr, uint32_t v)
611{
612
613	if (proc_writemem(td, td->td_proc, addr, &v, sizeof(v)) != sizeof(v))
614		return (ENOMEM);
615	return (0);
616}
617
618static u_int
619ptrace_get_usr_reg(void *cookie, int reg)
620{
621	int ret;
622	struct thread *td = cookie;
623
624	KASSERT(((reg >= 0) && (reg <= ARM_REG_NUM_PC)),
625	 ("reg is outside range"));
626
627	switch(reg) {
628	case ARM_REG_NUM_PC:
629		ret = td->td_frame->tf_pc;
630		break;
631	case ARM_REG_NUM_LR:
632		ret = td->td_frame->tf_usr_lr;
633		break;
634	case ARM_REG_NUM_SP:
635		ret = td->td_frame->tf_usr_sp;
636		break;
637	default:
638		ret = *((register_t*)&td->td_frame->tf_r0 + reg);
639		break;
640	}
641
642	return (ret);
643}
644
645static u_int
646ptrace_get_usr_int(void* cookie, vm_offset_t offset, u_int* val)
647{
648	struct thread *td = cookie;
649	u_int error;
650
651	error = ptrace_read_int(td, offset, val);
652
653	return (error);
654}
655
656/**
657 * This function parses current instruction opcode and decodes
658 * any possible jump (change in PC) which might occur after
659 * the instruction is executed.
660 *
661 * @param     td                Thread structure of analysed task
662 * @param     cur_instr         Currently executed instruction
663 * @param     alt_next_address  Pointer to the variable where
664 *                              the destination address of the
665 *                              jump instruction shall be stored.
666 *
667 * @return    <0>               when jump is possible
668 *            <EINVAL>          otherwise
669 */
670static int
671ptrace_get_alternative_next(struct thread *td, uint32_t cur_instr,
672    uint32_t *alt_next_address)
673{
674	int error;
675
676	if (inst_branch(cur_instr) || inst_call(cur_instr) ||
677	    inst_return(cur_instr)) {
678		error = arm_predict_branch(td, cur_instr, td->td_frame->tf_pc,
679		    alt_next_address, ptrace_get_usr_reg, ptrace_get_usr_int);
680
681		return (error);
682	}
683
684	return (EINVAL);
685}
686
687int
688ptrace_single_step(struct thread *td)
689{
690	struct proc *p;
691	int error, error_alt;
692	uint32_t cur_instr, alt_next = 0;
693
694	/* TODO: This needs to be updated for Thumb-2 */
695	if ((td->td_frame->tf_spsr & PSR_T) != 0)
696		return (EINVAL);
697
698	KASSERT(td->td_md.md_ptrace_instr == 0,
699	 ("Didn't clear single step"));
700	KASSERT(td->td_md.md_ptrace_instr_alt == 0,
701	 ("Didn't clear alternative single step"));
702	p = td->td_proc;
703	PROC_UNLOCK(p);
704
705	error = ptrace_read_int(td, td->td_frame->tf_pc,
706	    &cur_instr);
707	if (error)
708		goto out;
709
710	error = ptrace_read_int(td, td->td_frame->tf_pc + INSN_SIZE,
711	    &td->td_md.md_ptrace_instr);
712	if (error == 0) {
713		error = ptrace_write_int(td, td->td_frame->tf_pc + INSN_SIZE,
714		    PTRACE_BREAKPOINT);
715		if (error) {
716			td->td_md.md_ptrace_instr = 0;
717		} else {
718			td->td_md.md_ptrace_addr = td->td_frame->tf_pc +
719			    INSN_SIZE;
720		}
721	}
722
723	error_alt = ptrace_get_alternative_next(td, cur_instr, &alt_next);
724	if (error_alt == 0) {
725		error_alt = ptrace_read_int(td, alt_next,
726		    &td->td_md.md_ptrace_instr_alt);
727		if (error_alt) {
728			td->td_md.md_ptrace_instr_alt = 0;
729		} else {
730			error_alt = ptrace_write_int(td, alt_next,
731			    PTRACE_BREAKPOINT);
732			if (error_alt)
733				td->td_md.md_ptrace_instr_alt = 0;
734			else
735				td->td_md.md_ptrace_addr_alt = alt_next;
736		}
737	}
738
739out:
740	PROC_LOCK(p);
741	return ((error != 0) && (error_alt != 0));
742}
743
744int
745ptrace_clear_single_step(struct thread *td)
746{
747	struct proc *p;
748
749	/* TODO: This needs to be updated for Thumb-2 */
750	if ((td->td_frame->tf_spsr & PSR_T) != 0)
751		return (EINVAL);
752
753	if (td->td_md.md_ptrace_instr != 0) {
754		p = td->td_proc;
755		PROC_UNLOCK(p);
756		ptrace_write_int(td, td->td_md.md_ptrace_addr,
757		    td->td_md.md_ptrace_instr);
758		PROC_LOCK(p);
759		td->td_md.md_ptrace_instr = 0;
760	}
761
762	if (td->td_md.md_ptrace_instr_alt != 0) {
763		p = td->td_proc;
764		PROC_UNLOCK(p);
765		ptrace_write_int(td, td->td_md.md_ptrace_addr_alt,
766		    td->td_md.md_ptrace_instr_alt);
767		PROC_LOCK(p);
768		td->td_md.md_ptrace_instr_alt = 0;
769	}
770
771	return (0);
772}
773
774int
775ptrace_set_pc(struct thread *td, unsigned long addr)
776{
777	td->td_frame->tf_pc = addr;
778	return (0);
779}
780
781void
782cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
783{
784}
785
786void
787spinlock_enter(void)
788{
789	struct thread *td;
790	register_t cspr;
791
792	td = curthread;
793	if (td->td_md.md_spinlock_count == 0) {
794		cspr = disable_interrupts(PSR_I | PSR_F);
795		td->td_md.md_spinlock_count = 1;
796		td->td_md.md_saved_cspr = cspr;
797	} else
798		td->td_md.md_spinlock_count++;
799	critical_enter();
800}
801
802void
803spinlock_exit(void)
804{
805	struct thread *td;
806	register_t cspr;
807
808	td = curthread;
809	critical_exit();
810	cspr = td->td_md.md_saved_cspr;
811	td->td_md.md_spinlock_count--;
812	if (td->td_md.md_spinlock_count == 0)
813		restore_interrupts(cspr);
814}
815
816/*
817 * Clear registers on exec
818 */
819void
820exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
821{
822	struct trapframe *tf = td->td_frame;
823
824	memset(tf, 0, sizeof(*tf));
825	tf->tf_usr_sp = stack;
826	tf->tf_usr_lr = imgp->entry_addr;
827	tf->tf_svc_lr = 0x77777777;
828	tf->tf_pc = imgp->entry_addr;
829	tf->tf_spsr = PSR_USR32_MODE;
830}
831
832/*
833 * Get machine context.
834 */
835int
836get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
837{
838	struct trapframe *tf = td->td_frame;
839	__greg_t *gr = mcp->__gregs;
840
841	if (clear_ret & GET_MC_CLEAR_RET) {
842		gr[_REG_R0] = 0;
843		gr[_REG_CPSR] = tf->tf_spsr & ~PSR_C;
844	} else {
845		gr[_REG_R0]   = tf->tf_r0;
846		gr[_REG_CPSR] = tf->tf_spsr;
847	}
848	gr[_REG_R1]   = tf->tf_r1;
849	gr[_REG_R2]   = tf->tf_r2;
850	gr[_REG_R3]   = tf->tf_r3;
851	gr[_REG_R4]   = tf->tf_r4;
852	gr[_REG_R5]   = tf->tf_r5;
853	gr[_REG_R6]   = tf->tf_r6;
854	gr[_REG_R7]   = tf->tf_r7;
855	gr[_REG_R8]   = tf->tf_r8;
856	gr[_REG_R9]   = tf->tf_r9;
857	gr[_REG_R10]  = tf->tf_r10;
858	gr[_REG_R11]  = tf->tf_r11;
859	gr[_REG_R12]  = tf->tf_r12;
860	gr[_REG_SP]   = tf->tf_usr_sp;
861	gr[_REG_LR]   = tf->tf_usr_lr;
862	gr[_REG_PC]   = tf->tf_pc;
863
864	return (0);
865}
866
867/*
868 * Set machine context.
869 *
870 * However, we don't set any but the user modifiable flags, and we won't
871 * touch the cs selector.
872 */
873int
874set_mcontext(struct thread *td, mcontext_t *mcp)
875{
876	struct trapframe *tf = td->td_frame;
877	const __greg_t *gr = mcp->__gregs;
878
879	tf->tf_r0 = gr[_REG_R0];
880	tf->tf_r1 = gr[_REG_R1];
881	tf->tf_r2 = gr[_REG_R2];
882	tf->tf_r3 = gr[_REG_R3];
883	tf->tf_r4 = gr[_REG_R4];
884	tf->tf_r5 = gr[_REG_R5];
885	tf->tf_r6 = gr[_REG_R6];
886	tf->tf_r7 = gr[_REG_R7];
887	tf->tf_r8 = gr[_REG_R8];
888	tf->tf_r9 = gr[_REG_R9];
889	tf->tf_r10 = gr[_REG_R10];
890	tf->tf_r11 = gr[_REG_R11];
891	tf->tf_r12 = gr[_REG_R12];
892	tf->tf_usr_sp = gr[_REG_SP];
893	tf->tf_usr_lr = gr[_REG_LR];
894	tf->tf_pc = gr[_REG_PC];
895	tf->tf_spsr = gr[_REG_CPSR];
896
897	return (0);
898}
899
900/*
901 * MPSAFE
902 */
903int
904sys_sigreturn(td, uap)
905	struct thread *td;
906	struct sigreturn_args /* {
907		const struct __ucontext *sigcntxp;
908	} */ *uap;
909{
910	ucontext_t uc;
911	int spsr;
912
913	if (uap == NULL)
914		return (EFAULT);
915	if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
916		return (EFAULT);
917	/*
918	 * Make sure the processor mode has not been tampered with and
919	 * interrupts have not been disabled.
920	 */
921	spsr = uc.uc_mcontext.__gregs[_REG_CPSR];
922	if ((spsr & PSR_MODE) != PSR_USR32_MODE ||
923	    (spsr & (PSR_I | PSR_F)) != 0)
924		return (EINVAL);
925		/* Restore register context. */
926	set_mcontext(td, &uc.uc_mcontext);
927
928	/* Restore signal mask. */
929	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
930
931	return (EJUSTRETURN);
932}
933
934
935/*
936 * Construct a PCB from a trapframe. This is called from kdb_trap() where
937 * we want to start a backtrace from the function that caused us to enter
938 * the debugger. We have the context in the trapframe, but base the trace
939 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
940 * enough for a backtrace.
941 */
942void
943makectx(struct trapframe *tf, struct pcb *pcb)
944{
945	pcb->pcb_regs.sf_r4 = tf->tf_r4;
946	pcb->pcb_regs.sf_r5 = tf->tf_r5;
947	pcb->pcb_regs.sf_r6 = tf->tf_r6;
948	pcb->pcb_regs.sf_r7 = tf->tf_r7;
949	pcb->pcb_regs.sf_r8 = tf->tf_r8;
950	pcb->pcb_regs.sf_r9 = tf->tf_r9;
951	pcb->pcb_regs.sf_r10 = tf->tf_r10;
952	pcb->pcb_regs.sf_r11 = tf->tf_r11;
953	pcb->pcb_regs.sf_r12 = tf->tf_r12;
954	pcb->pcb_regs.sf_pc = tf->tf_pc;
955	pcb->pcb_regs.sf_lr = tf->tf_usr_lr;
956	pcb->pcb_regs.sf_sp = tf->tf_usr_sp;
957}
958
959/*
960 * Fake up a boot descriptor table
961 */
962vm_offset_t
963fake_preload_metadata(struct arm_boot_params *abp __unused, void *dtb_ptr,
964    size_t dtb_size)
965{
966#ifdef DDB
967	vm_offset_t zstart = 0, zend = 0;
968#endif
969	vm_offset_t lastaddr;
970	int i = 0;
971	static uint32_t fake_preload[35];
972
973	fake_preload[i++] = MODINFO_NAME;
974	fake_preload[i++] = strlen("kernel") + 1;
975	strcpy((char*)&fake_preload[i++], "kernel");
976	i += 1;
977	fake_preload[i++] = MODINFO_TYPE;
978	fake_preload[i++] = strlen("elf kernel") + 1;
979	strcpy((char*)&fake_preload[i++], "elf kernel");
980	i += 2;
981	fake_preload[i++] = MODINFO_ADDR;
982	fake_preload[i++] = sizeof(vm_offset_t);
983	fake_preload[i++] = KERNVIRTADDR;
984	fake_preload[i++] = MODINFO_SIZE;
985	fake_preload[i++] = sizeof(uint32_t);
986	fake_preload[i++] = (uint32_t)&end - KERNVIRTADDR;
987#ifdef DDB
988	if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) {
989		fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM;
990		fake_preload[i++] = sizeof(vm_offset_t);
991		fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4);
992		fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM;
993		fake_preload[i++] = sizeof(vm_offset_t);
994		fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8);
995		lastaddr = *(uint32_t *)(KERNVIRTADDR + 8);
996		zend = lastaddr;
997		zstart = *(uint32_t *)(KERNVIRTADDR + 4);
998		db_fetch_ksymtab(zstart, zend);
999	} else
1000#endif
1001		lastaddr = (vm_offset_t)&end;
1002	if (dtb_ptr != NULL) {
1003		/* Copy DTB to KVA space and insert it into module chain. */
1004		lastaddr = roundup(lastaddr, sizeof(int));
1005		fake_preload[i++] = MODINFO_METADATA | MODINFOMD_DTBP;
1006		fake_preload[i++] = sizeof(uint32_t);
1007		fake_preload[i++] = (uint32_t)lastaddr;
1008		memmove((void *)lastaddr, dtb_ptr, dtb_size);
1009		lastaddr += dtb_size;
1010		lastaddr = roundup(lastaddr, sizeof(int));
1011	}
1012	fake_preload[i++] = 0;
1013	fake_preload[i] = 0;
1014	preload_metadata = (void *)fake_preload;
1015
1016	init_static_kenv(NULL, 0);
1017
1018	return (lastaddr);
1019}
1020
1021void
1022pcpu0_init(void)
1023{
1024#if __ARM_ARCH >= 6
1025	set_curthread(&thread0);
1026#endif
1027	pcpu_init(pcpup, 0, sizeof(struct pcpu));
1028	PCPU_SET(curthread, &thread0);
1029}
1030
1031#if defined(LINUX_BOOT_ABI)
1032vm_offset_t
1033linux_parse_boot_param(struct arm_boot_params *abp)
1034{
1035	struct arm_lbabi_tag *walker;
1036	uint32_t revision;
1037	uint64_t serial;
1038#ifdef FDT
1039	struct fdt_header *dtb_ptr;
1040	uint32_t dtb_size;
1041#endif
1042
1043	/*
1044	 * Linux boot ABI: r0 = 0, r1 is the board type (!= 0) and r2
1045	 * is atags or dtb pointer.  If all of these aren't satisfied,
1046	 * then punt. Unfortunately, it looks like DT enabled kernels
1047	 * doesn't uses board type and U-Boot delivers 0 in r1 for them.
1048	 */
1049	if (abp->abp_r0 != 0 || abp->abp_r2 == 0)
1050		return (0);
1051#ifdef FDT
1052	/* Test if r2 point to valid DTB. */
1053	dtb_ptr = (struct fdt_header *)abp->abp_r2;
1054	if (fdt_check_header(dtb_ptr) == 0) {
1055		dtb_size = fdt_totalsize(dtb_ptr);
1056		return (fake_preload_metadata(abp, dtb_ptr, dtb_size));
1057	}
1058#endif
1059	/* Old, ATAG based boot must have board type set. */
1060	if (abp->abp_r1 == 0)
1061		return (0);
1062
1063	board_id = abp->abp_r1;
1064	walker = (struct arm_lbabi_tag *)
1065	    (abp->abp_r2 + KERNVIRTADDR - abp->abp_physaddr);
1066
1067	if (ATAG_TAG(walker) != ATAG_CORE)
1068		return 0;
1069
1070	atag_list = walker;
1071	while (ATAG_TAG(walker) != ATAG_NONE) {
1072		switch (ATAG_TAG(walker)) {
1073		case ATAG_CORE:
1074			break;
1075		case ATAG_MEM:
1076			arm_physmem_hardware_region(walker->u.tag_mem.start,
1077			    walker->u.tag_mem.size);
1078			break;
1079		case ATAG_INITRD2:
1080			break;
1081		case ATAG_SERIAL:
1082			serial = walker->u.tag_sn.low |
1083			    ((uint64_t)walker->u.tag_sn.high << 32);
1084			board_set_serial(serial);
1085			break;
1086		case ATAG_REVISION:
1087			revision = walker->u.tag_rev.rev;
1088			board_set_revision(revision);
1089			break;
1090		case ATAG_CMDLINE:
1091			/* XXX open question: Parse this for boothowto? */
1092			bcopy(walker->u.tag_cmd.command, linux_command_line,
1093			      ATAG_SIZE(walker));
1094			break;
1095		default:
1096			break;
1097		}
1098		walker = ATAG_NEXT(walker);
1099	}
1100
1101	/* Save a copy for later */
1102	bcopy(atag_list, atags,
1103	    (char *)walker - (char *)atag_list + ATAG_SIZE(walker));
1104
1105	init_static_kenv(NULL, 0);
1106
1107	return fake_preload_metadata(abp, NULL, 0);
1108}
1109#endif
1110
1111#if defined(FREEBSD_BOOT_LOADER)
1112vm_offset_t
1113freebsd_parse_boot_param(struct arm_boot_params *abp)
1114{
1115	vm_offset_t lastaddr = 0;
1116	void *mdp;
1117	void *kmdp;
1118#ifdef DDB
1119	vm_offset_t ksym_start;
1120	vm_offset_t ksym_end;
1121#endif
1122
1123	/*
1124	 * Mask metadata pointer: it is supposed to be on page boundary. If
1125	 * the first argument (mdp) doesn't point to a valid address the
1126	 * bootloader must have passed us something else than the metadata
1127	 * ptr, so we give up.  Also give up if we cannot find metadta section
1128	 * the loader creates that we get all this data out of.
1129	 */
1130
1131	if ((mdp = (void *)(abp->abp_r0 & ~PAGE_MASK)) == NULL)
1132		return 0;
1133	preload_metadata = mdp;
1134	kmdp = preload_search_by_type("elf kernel");
1135	if (kmdp == NULL)
1136		return 0;
1137
1138	boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
1139	loader_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
1140	init_static_kenv(loader_envp, 0);
1141	lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
1142#ifdef DDB
1143	ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
1144	ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
1145	db_fetch_ksymtab(ksym_start, ksym_end);
1146#endif
1147	return lastaddr;
1148}
1149#endif
1150
1151vm_offset_t
1152default_parse_boot_param(struct arm_boot_params *abp)
1153{
1154	vm_offset_t lastaddr;
1155
1156#if defined(LINUX_BOOT_ABI)
1157	if ((lastaddr = linux_parse_boot_param(abp)) != 0)
1158		return lastaddr;
1159#endif
1160#if defined(FREEBSD_BOOT_LOADER)
1161	if ((lastaddr = freebsd_parse_boot_param(abp)) != 0)
1162		return lastaddr;
1163#endif
1164	/* Fall back to hardcoded metadata. */
1165	lastaddr = fake_preload_metadata(abp, NULL, 0);
1166
1167	return lastaddr;
1168}
1169
1170/*
1171 * Stub version of the boot parameter parsing routine.  We are
1172 * called early in initarm, before even VM has been initialized.
1173 * This routine needs to preserve any data that the boot loader
1174 * has passed in before the kernel starts to grow past the end
1175 * of the BSS, traditionally the place boot-loaders put this data.
1176 *
1177 * Since this is called so early, things that depend on the vm system
1178 * being setup (including access to some SoC's serial ports), about
1179 * all that can be done in this routine is to copy the arguments.
1180 *
1181 * This is the default boot parameter parsing routine.  Individual
1182 * kernels/boards can override this weak function with one of their
1183 * own.  We just fake metadata...
1184 */
1185__weak_reference(default_parse_boot_param, parse_boot_param);
1186
1187/*
1188 * Initialize proc0
1189 */
1190void
1191init_proc0(vm_offset_t kstack)
1192{
1193	proc_linkup0(&proc0, &thread0);
1194	thread0.td_kstack = kstack;
1195	thread0.td_pcb = (struct pcb *)
1196		(thread0.td_kstack + kstack_pages * PAGE_SIZE) - 1;
1197	thread0.td_pcb->pcb_flags = 0;
1198	thread0.td_pcb->pcb_vfpcpu = -1;
1199	thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN;
1200	thread0.td_frame = &proc0_tf;
1201	pcpup->pc_curpcb = thread0.td_pcb;
1202}
1203
1204int
1205arm_predict_branch(void *cookie, u_int insn, register_t pc, register_t *new_pc,
1206    u_int (*fetch_reg)(void*, int), u_int (*read_int)(void*, vm_offset_t, u_int*))
1207{
1208	u_int addr, nregs, offset = 0;
1209	int error = 0;
1210
1211	switch ((insn >> 24) & 0xf) {
1212	case 0x2:	/* add pc, reg1, #value */
1213	case 0x0:	/* add pc, reg1, reg2, lsl #offset */
1214		addr = fetch_reg(cookie, (insn >> 16) & 0xf);
1215		if (((insn >> 16) & 0xf) == 15)
1216			addr += 8;
1217		if (insn & 0x0200000) {
1218			offset = (insn >> 7) & 0x1e;
1219			offset = (insn & 0xff) << (32 - offset) |
1220			    (insn & 0xff) >> offset;
1221		} else {
1222
1223			offset = fetch_reg(cookie, insn & 0x0f);
1224			if ((insn & 0x0000ff0) != 0x00000000) {
1225				if (insn & 0x10)
1226					nregs = fetch_reg(cookie,
1227					    (insn >> 8) & 0xf);
1228				else
1229					nregs = (insn >> 7) & 0x1f;
1230				switch ((insn >> 5) & 3) {
1231				case 0:
1232					/* lsl */
1233					offset = offset << nregs;
1234					break;
1235				case 1:
1236					/* lsr */
1237					offset = offset >> nregs;
1238					break;
1239				default:
1240					break; /* XXX */
1241				}
1242
1243			}
1244			*new_pc = addr + offset;
1245			return (0);
1246
1247		}
1248
1249	case 0xa:	/* b ... */
1250	case 0xb:	/* bl ... */
1251		addr = ((insn << 2) & 0x03ffffff);
1252		if (addr & 0x02000000)
1253			addr |= 0xfc000000;
1254		*new_pc = (pc + 8 + addr);
1255		return (0);
1256	case 0x7:	/* ldr pc, [pc, reg, lsl #2] */
1257		addr = fetch_reg(cookie, insn & 0xf);
1258		addr = pc + 8 + (addr << 2);
1259		error = read_int(cookie, addr, &addr);
1260		*new_pc = addr;
1261		return (error);
1262	case 0x1:	/* mov pc, reg */
1263		*new_pc = fetch_reg(cookie, insn & 0xf);
1264		return (0);
1265	case 0x4:
1266	case 0x5:	/* ldr pc, [reg] */
1267		addr = fetch_reg(cookie, (insn >> 16) & 0xf);
1268		/* ldr pc, [reg, #offset] */
1269		if (insn & (1 << 24))
1270			offset = insn & 0xfff;
1271		if (insn & 0x00800000)
1272			addr += offset;
1273		else
1274			addr -= offset;
1275		error = read_int(cookie, addr, &addr);
1276		*new_pc = addr;
1277
1278		return (error);
1279	case 0x8:	/* ldmxx reg, {..., pc} */
1280	case 0x9:
1281		addr = fetch_reg(cookie, (insn >> 16) & 0xf);
1282		nregs = (insn  & 0x5555) + ((insn  >> 1) & 0x5555);
1283		nregs = (nregs & 0x3333) + ((nregs >> 2) & 0x3333);
1284		nregs = (nregs + (nregs >> 4)) & 0x0f0f;
1285		nregs = (nregs + (nregs >> 8)) & 0x001f;
1286		switch ((insn >> 23) & 0x3) {
1287		case 0x0:	/* ldmda */
1288			addr = addr - 0;
1289			break;
1290		case 0x1:	/* ldmia */
1291			addr = addr + 0 + ((nregs - 1) << 2);
1292			break;
1293		case 0x2:	/* ldmdb */
1294			addr = addr - 4;
1295			break;
1296		case 0x3:	/* ldmib */
1297			addr = addr + 4 + ((nregs - 1) << 2);
1298			break;
1299		}
1300		error = read_int(cookie, addr, &addr);
1301		*new_pc = addr;
1302
1303		return (error);
1304	default:
1305		return (EINVAL);
1306	}
1307}
1308
1309#if __ARM_ARCH >= 6
1310void
1311set_stackptrs(int cpu)
1312{
1313
1314	set_stackptr(PSR_IRQ32_MODE,
1315	    irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1316	set_stackptr(PSR_ABT32_MODE,
1317	    abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1318	set_stackptr(PSR_UND32_MODE,
1319	    undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1320}
1321#else
1322void
1323set_stackptrs(int cpu)
1324{
1325
1326	set_stackptr(PSR_IRQ32_MODE,
1327	    irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1328	set_stackptr(PSR_ABT32_MODE,
1329	    abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1330	set_stackptr(PSR_UND32_MODE,
1331	    undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1332}
1333#endif
1334
1335#ifdef EFI
1336#define efi_next_descriptor(ptr, size) \
1337	((struct efi_md *)(((uint8_t *) ptr) + size))
1338
1339static void
1340add_efi_map_entries(struct efi_map_header *efihdr, struct mem_region *mr,
1341    int *mrcnt)
1342{
1343	struct efi_md *map, *p;
1344	const char *type;
1345	size_t efisz, memory_size;
1346	int ndesc, i, j;
1347
1348	static const char *types[] = {
1349		"Reserved",
1350		"LoaderCode",
1351		"LoaderData",
1352		"BootServicesCode",
1353		"BootServicesData",
1354		"RuntimeServicesCode",
1355		"RuntimeServicesData",
1356		"ConventionalMemory",
1357		"UnusableMemory",
1358		"ACPIReclaimMemory",
1359		"ACPIMemoryNVS",
1360		"MemoryMappedIO",
1361		"MemoryMappedIOPortSpace",
1362		"PalCode"
1363	};
1364
1365	*mrcnt = 0;
1366
1367	/*
1368	 * Memory map data provided by UEFI via the GetMemoryMap
1369	 * Boot Services API.
1370	 */
1371	efisz = roundup2(sizeof(struct efi_map_header), 0x10);
1372	map = (struct efi_md *)((uint8_t *)efihdr + efisz);
1373
1374	if (efihdr->descriptor_size == 0)
1375		return;
1376	ndesc = efihdr->memory_size / efihdr->descriptor_size;
1377
1378	if (boothowto & RB_VERBOSE)
1379		printf("%23s %12s %12s %8s %4s\n",
1380		    "Type", "Physical", "Virtual", "#Pages", "Attr");
1381
1382	memory_size = 0;
1383	for (i = 0, j = 0, p = map; i < ndesc; i++,
1384	    p = efi_next_descriptor(p, efihdr->descriptor_size)) {
1385		if (boothowto & RB_VERBOSE) {
1386			if (p->md_type <= EFI_MD_TYPE_PALCODE)
1387				type = types[p->md_type];
1388			else
1389				type = "<INVALID>";
1390			printf("%23s %012llx %12p %08llx ", type, p->md_phys,
1391			    p->md_virt, p->md_pages);
1392			if (p->md_attr & EFI_MD_ATTR_UC)
1393				printf("UC ");
1394			if (p->md_attr & EFI_MD_ATTR_WC)
1395				printf("WC ");
1396			if (p->md_attr & EFI_MD_ATTR_WT)
1397				printf("WT ");
1398			if (p->md_attr & EFI_MD_ATTR_WB)
1399				printf("WB ");
1400			if (p->md_attr & EFI_MD_ATTR_UCE)
1401				printf("UCE ");
1402			if (p->md_attr & EFI_MD_ATTR_WP)
1403				printf("WP ");
1404			if (p->md_attr & EFI_MD_ATTR_RP)
1405				printf("RP ");
1406			if (p->md_attr & EFI_MD_ATTR_XP)
1407				printf("XP ");
1408			if (p->md_attr & EFI_MD_ATTR_RT)
1409				printf("RUNTIME");
1410			printf("\n");
1411		}
1412
1413		switch (p->md_type) {
1414		case EFI_MD_TYPE_CODE:
1415		case EFI_MD_TYPE_DATA:
1416		case EFI_MD_TYPE_BS_CODE:
1417		case EFI_MD_TYPE_BS_DATA:
1418		case EFI_MD_TYPE_FREE:
1419			/*
1420			 * We're allowed to use any entry with these types.
1421			 */
1422			break;
1423		default:
1424			continue;
1425		}
1426
1427		j++;
1428		if (j >= FDT_MEM_REGIONS)
1429			break;
1430
1431		mr[j].mr_start = p->md_phys;
1432		mr[j].mr_size = p->md_pages * PAGE_SIZE;
1433		memory_size += mr[j].mr_size;
1434	}
1435
1436	*mrcnt = j;
1437}
1438#endif /* EFI */
1439
1440#ifdef FDT
1441static char *
1442kenv_next(char *cp)
1443{
1444
1445	if (cp != NULL) {
1446		while (*cp != 0)
1447			cp++;
1448		cp++;
1449		if (*cp == 0)
1450			cp = NULL;
1451	}
1452	return (cp);
1453}
1454
1455static void
1456print_kenv(void)
1457{
1458	char *cp;
1459
1460	debugf("loader passed (static) kenv:\n");
1461	if (loader_envp == NULL) {
1462		debugf(" no env, null ptr\n");
1463		return;
1464	}
1465	debugf(" loader_envp = 0x%08x\n", (uint32_t)loader_envp);
1466
1467	for (cp = loader_envp; cp != NULL; cp = kenv_next(cp))
1468		debugf(" %x %s\n", (uint32_t)cp, cp);
1469}
1470
1471#if __ARM_ARCH < 6
1472void *
1473initarm(struct arm_boot_params *abp)
1474{
1475	struct mem_region mem_regions[FDT_MEM_REGIONS];
1476	struct pv_addr kernel_l1pt;
1477	struct pv_addr dpcpu;
1478	vm_offset_t dtbp, freemempos, l2_start, lastaddr;
1479	uint64_t memsize;
1480	uint32_t l2size;
1481	char *env;
1482	void *kmdp;
1483	u_int l1pagetable;
1484	int i, j, err_devmap, mem_regions_sz;
1485
1486	lastaddr = parse_boot_param(abp);
1487	arm_physmem_kernaddr = abp->abp_physaddr;
1488
1489	memsize = 0;
1490
1491	cpuinfo_init();
1492	set_cpufuncs();
1493
1494	/*
1495	 * Find the dtb passed in by the boot loader.
1496	 */
1497	kmdp = preload_search_by_type("elf kernel");
1498	if (kmdp != NULL)
1499		dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
1500	else
1501		dtbp = (vm_offset_t)NULL;
1502
1503#if defined(FDT_DTB_STATIC)
1504	/*
1505	 * In case the device tree blob was not retrieved (from metadata) try
1506	 * to use the statically embedded one.
1507	 */
1508	if (dtbp == (vm_offset_t)NULL)
1509		dtbp = (vm_offset_t)&fdt_static_dtb;
1510#endif
1511
1512	if (OF_install(OFW_FDT, 0) == FALSE)
1513		panic("Cannot install FDT");
1514
1515	if (OF_init((void *)dtbp) != 0)
1516		panic("OF_init failed with the found device tree");
1517
1518	/* Grab physical memory regions information from device tree. */
1519	if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0)
1520		panic("Cannot get physical memory regions");
1521	arm_physmem_hardware_regions(mem_regions, mem_regions_sz);
1522
1523	/* Grab reserved memory regions information from device tree. */
1524	if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
1525		arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
1526		    EXFLAG_NODUMP | EXFLAG_NOALLOC);
1527
1528	/* Platform-specific initialisation */
1529	platform_probe_and_attach();
1530
1531	pcpu0_init();
1532
1533	/* Do basic tuning, hz etc */
1534	init_param1();
1535
1536	/* Calculate number of L2 tables needed for mapping vm_page_array */
1537	l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page);
1538	l2size = (l2size >> L1_S_SHIFT) + 1;
1539
1540	/*
1541	 * Add one table for end of kernel map, one for stacks, msgbuf and
1542	 * L1 and L2 tables map and one for vectors map.
1543	 */
1544	l2size += 3;
1545
1546	/* Make it divisible by 4 */
1547	l2size = (l2size + 3) & ~3;
1548
1549	freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK;
1550
1551	/* Define a macro to simplify memory allocation */
1552#define valloc_pages(var, np)						\
1553	alloc_pages((var).pv_va, (np));					\
1554	(var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR);
1555
1556#define alloc_pages(var, np)						\
1557	(var) = freemempos;						\
1558	freemempos += (np * PAGE_SIZE);					\
1559	memset((char *)(var), 0, ((np) * PAGE_SIZE));
1560
1561	while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
1562		freemempos += PAGE_SIZE;
1563	valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
1564
1565	for (i = 0, j = 0; i < l2size; ++i) {
1566		if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
1567			valloc_pages(kernel_pt_table[i],
1568			    L2_TABLE_SIZE / PAGE_SIZE);
1569			j = i;
1570		} else {
1571			kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va +
1572			    L2_TABLE_SIZE_REAL * (i - j);
1573			kernel_pt_table[i].pv_pa =
1574			    kernel_pt_table[i].pv_va - KERNVIRTADDR +
1575			    abp->abp_physaddr;
1576
1577		}
1578	}
1579	/*
1580	 * Allocate a page for the system page mapped to 0x00000000
1581	 * or 0xffff0000. This page will just contain the system vectors
1582	 * and can be shared by all processes.
1583	 */
1584	valloc_pages(systempage, 1);
1585
1586	/* Allocate dynamic per-cpu area. */
1587	valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
1588	dpcpu_init((void *)dpcpu.pv_va, 0);
1589
1590	/* Allocate stacks for all modes */
1591	valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU);
1592	valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU);
1593	valloc_pages(undstack, UND_STACK_SIZE * MAXCPU);
1594	valloc_pages(kernelstack, kstack_pages * MAXCPU);
1595	valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
1596
1597	/*
1598	 * Now we start construction of the L1 page table
1599	 * We start by mapping the L2 page tables into the L1.
1600	 * This means that we can replace L1 mappings later on if necessary
1601	 */
1602	l1pagetable = kernel_l1pt.pv_va;
1603
1604	/*
1605	 * Try to map as much as possible of kernel text and data using
1606	 * 1MB section mapping and for the rest of initial kernel address
1607	 * space use L2 coarse tables.
1608	 *
1609	 * Link L2 tables for mapping remainder of kernel (modulo 1MB)
1610	 * and kernel structures
1611	 */
1612	l2_start = lastaddr & ~(L1_S_OFFSET);
1613	for (i = 0 ; i < l2size - 1; i++)
1614		pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE,
1615		    &kernel_pt_table[i]);
1616
1617	pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE;
1618
1619	/* Map kernel code and data */
1620	pmap_map_chunk(l1pagetable, KERNVIRTADDR, abp->abp_physaddr,
1621	   (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK,
1622	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1623
1624	/* Map L1 directory and allocated L2 page tables */
1625	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
1626	    L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
1627
1628	pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va,
1629	    kernel_pt_table[0].pv_pa,
1630	    L2_TABLE_SIZE_REAL * l2size,
1631	    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
1632
1633	/* Map allocated DPCPU, stacks and msgbuf */
1634	pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa,
1635	    freemempos - dpcpu.pv_va,
1636	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1637
1638	/* Link and map the vector page */
1639	pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH,
1640	    &kernel_pt_table[l2size - 1]);
1641	pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
1642	    VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE);
1643
1644	/* Establish static device mappings. */
1645	err_devmap = platform_devmap_init();
1646	arm_devmap_bootstrap(l1pagetable, NULL);
1647	vm_max_kernel_address = platform_lastaddr();
1648
1649	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT);
1650	pmap_pa = kernel_l1pt.pv_pa;
1651	cpu_setttb(kernel_l1pt.pv_pa);
1652	cpu_tlb_flushID();
1653	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2));
1654
1655	/*
1656	 * Now that proper page tables are installed, call cpu_setup() to enable
1657	 * instruction and data caches and other chip-specific features.
1658	 */
1659	cpu_setup();
1660
1661	/*
1662	 * Only after the SOC registers block is mapped we can perform device
1663	 * tree fixups, as they may attempt to read parameters from hardware.
1664	 */
1665	OF_interpret("perform-fixup", 0);
1666
1667	platform_gpio_init();
1668
1669	cninit();
1670
1671	debugf("initarm: console initialized\n");
1672	debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
1673	debugf(" boothowto = 0x%08x\n", boothowto);
1674	debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
1675	print_kenv();
1676
1677	env = kern_getenv("kernelname");
1678	if (env != NULL) {
1679		strlcpy(kernelname, env, sizeof(kernelname));
1680		freeenv(env);
1681	}
1682
1683	if (err_devmap != 0)
1684		printf("WARNING: could not fully configure devmap, error=%d\n",
1685		    err_devmap);
1686
1687	platform_late_init();
1688
1689	/*
1690	 * Pages were allocated during the secondary bootstrap for the
1691	 * stacks for different CPU modes.
1692	 * We must now set the r13 registers in the different CPU modes to
1693	 * point to these stacks.
1694	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
1695	 * of the stack memory.
1696	 */
1697	cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE);
1698
1699	set_stackptrs(0);
1700
1701	/*
1702	 * We must now clean the cache again....
1703	 * Cleaning may be done by reading new data to displace any
1704	 * dirty data in the cache. This will have happened in cpu_setttb()
1705	 * but since we are boot strapping the addresses used for the read
1706	 * may have just been remapped and thus the cache could be out
1707	 * of sync. A re-clean after the switch will cure this.
1708	 * After booting there are no gross relocations of the kernel thus
1709	 * this problem will not occur after initarm().
1710	 */
1711	cpu_idcache_wbinv_all();
1712
1713	undefined_init();
1714
1715	init_proc0(kernelstack.pv_va);
1716
1717	arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
1718	pmap_bootstrap(freemempos, &kernel_l1pt);
1719	msgbufp = (void *)msgbufpv.pv_va;
1720	msgbufinit(msgbufp, msgbufsize);
1721	mutex_init();
1722
1723	/*
1724	 * Exclude the kernel (and all the things we allocated which immediately
1725	 * follow the kernel) from the VM allocation pool but not from crash
1726	 * dumps.  virtual_avail is a global variable which tracks the kva we've
1727	 * "allocated" while setting up pmaps.
1728	 *
1729	 * Prepare the list of physical memory available to the vm subsystem.
1730	 */
1731	arm_physmem_exclude_region(abp->abp_physaddr,
1732	    (virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC);
1733	arm_physmem_init_kernel_globals();
1734
1735	init_param2(physmem);
1736	dbg_monitor_init();
1737	kdb_init();
1738
1739	return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
1740	    sizeof(struct pcb)));
1741}
1742#else /* __ARM_ARCH < 6 */
1743void *
1744initarm(struct arm_boot_params *abp)
1745{
1746	struct mem_region mem_regions[FDT_MEM_REGIONS];
1747	vm_paddr_t lastaddr;
1748	vm_offset_t dtbp, kernelstack, dpcpu;
1749	char *env;
1750	void *kmdp;
1751	int err_devmap, mem_regions_sz;
1752#ifdef EFI
1753	struct efi_map_header *efihdr;
1754#endif
1755
1756	/* get last allocated physical address */
1757	arm_physmem_kernaddr = abp->abp_physaddr;
1758	lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr;
1759
1760	set_cpufuncs();
1761	cpuinfo_init();
1762
1763	/*
1764	 * Find the dtb passed in by the boot loader.
1765	 */
1766	kmdp = preload_search_by_type("elf kernel");
1767	dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
1768#if defined(FDT_DTB_STATIC)
1769	/*
1770	 * In case the device tree blob was not retrieved (from metadata) try
1771	 * to use the statically embedded one.
1772	 */
1773	if (dtbp == (vm_offset_t)NULL)
1774		dtbp = (vm_offset_t)&fdt_static_dtb;
1775#endif
1776
1777	if (OF_install(OFW_FDT, 0) == FALSE)
1778		panic("Cannot install FDT");
1779
1780	if (OF_init((void *)dtbp) != 0)
1781		panic("OF_init failed with the found device tree");
1782
1783#ifdef EFI
1784	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1785	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
1786	if (efihdr != NULL) {
1787		add_efi_map_entries(efihdr, mem_regions, &mem_regions_sz);
1788	} else
1789#endif
1790	{
1791		/* Grab physical memory regions information from device tree. */
1792		if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,NULL) != 0)
1793			panic("Cannot get physical memory regions");
1794	}
1795	arm_physmem_hardware_regions(mem_regions, mem_regions_sz);
1796
1797	/* Grab reserved memory regions information from device tree. */
1798	if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
1799		arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
1800		    EXFLAG_NODUMP | EXFLAG_NOALLOC);
1801
1802	/*
1803	 * Set TEX remapping registers.
1804	 * Setup kernel page tables and switch to kernel L1 page table.
1805	 */
1806	pmap_set_tex();
1807	pmap_bootstrap_prepare(lastaddr);
1808
1809	/*
1810	 * Now that proper page tables are installed, call cpu_setup() to enable
1811	 * instruction and data caches and other chip-specific features.
1812	 */
1813	cpu_setup();
1814
1815	/* Platform-specific initialisation */
1816	platform_probe_and_attach();
1817	pcpu0_init();
1818
1819	/* Do basic tuning, hz etc */
1820	init_param1();
1821
1822	/*
1823	 * Allocate a page for the system page mapped to 0xffff0000
1824	 * This page will just contain the system vectors and can be
1825	 * shared by all processes.
1826	 */
1827	systempage = pmap_preboot_get_pages(1);
1828
1829	/* Map the vector page. */
1830	pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH,  1);
1831	if (virtual_end >= ARM_VECTORS_HIGH)
1832		virtual_end = ARM_VECTORS_HIGH - 1;
1833
1834	/* Allocate dynamic per-cpu area. */
1835	dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE);
1836	dpcpu_init((void *)dpcpu, 0);
1837
1838	/* Allocate stacks for all modes */
1839	irqstack    = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU);
1840	abtstack    = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU);
1841	undstack    = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU );
1842	kernelstack = pmap_preboot_get_vpages(kstack_pages * MAXCPU);
1843
1844	/* Allocate message buffer. */
1845	msgbufp = (void *)pmap_preboot_get_vpages(
1846	    round_page(msgbufsize) / PAGE_SIZE);
1847
1848	/*
1849	 * Pages were allocated during the secondary bootstrap for the
1850	 * stacks for different CPU modes.
1851	 * We must now set the r13 registers in the different CPU modes to
1852	 * point to these stacks.
1853	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
1854	 * of the stack memory.
1855	 */
1856	set_stackptrs(0);
1857	mutex_init();
1858
1859	/* Establish static device mappings. */
1860	err_devmap = platform_devmap_init();
1861	arm_devmap_bootstrap(0, NULL);
1862	vm_max_kernel_address = platform_lastaddr();
1863
1864	/*
1865	 * Only after the SOC registers block is mapped we can perform device
1866	 * tree fixups, as they may attempt to read parameters from hardware.
1867	 */
1868	OF_interpret("perform-fixup", 0);
1869	platform_gpio_init();
1870	cninit();
1871
1872	debugf("initarm: console initialized\n");
1873	debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
1874	debugf(" boothowto = 0x%08x\n", boothowto);
1875	debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
1876	debugf(" lastaddr1: 0x%08x\n", lastaddr);
1877	print_kenv();
1878
1879	env = kern_getenv("kernelname");
1880	if (env != NULL)
1881		strlcpy(kernelname, env, sizeof(kernelname));
1882
1883	if (err_devmap != 0)
1884		printf("WARNING: could not fully configure devmap, error=%d\n",
1885		    err_devmap);
1886
1887	platform_late_init();
1888
1889	/*
1890	 * We must now clean the cache again....
1891	 * Cleaning may be done by reading new data to displace any
1892	 * dirty data in the cache. This will have happened in cpu_setttb()
1893	 * but since we are boot strapping the addresses used for the read
1894	 * may have just been remapped and thus the cache could be out
1895	 * of sync. A re-clean after the switch will cure this.
1896	 * After booting there are no gross relocations of the kernel thus
1897	 * this problem will not occur after initarm().
1898	 */
1899	/* Set stack for exception handlers */
1900	undefined_init();
1901	init_proc0(kernelstack);
1902	arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
1903	enable_interrupts(PSR_A);
1904	pmap_bootstrap(0);
1905
1906	/* Exclude the kernel (and all the things we allocated which immediately
1907	 * follow the kernel) from the VM allocation pool but not from crash
1908	 * dumps.  virtual_avail is a global variable which tracks the kva we've
1909	 * "allocated" while setting up pmaps.
1910	 *
1911	 * Prepare the list of physical memory available to the vm subsystem.
1912	 */
1913	arm_physmem_exclude_region(abp->abp_physaddr,
1914		pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC);
1915	arm_physmem_init_kernel_globals();
1916
1917	init_param2(physmem);
1918	/* Init message buffer. */
1919	msgbufinit(msgbufp, msgbufsize);
1920	dbg_monitor_init();
1921	kdb_init();
1922	return ((void *)STACKALIGN(thread0.td_pcb));
1923
1924}
1925
1926#endif /* __ARM_ARCH < 6 */
1927#endif /* FDT */
1928
1929uint32_t (*arm_cpu_fill_vdso_timehands)(struct vdso_timehands *,
1930    struct timecounter *);
1931
1932uint32_t
1933cpu_fill_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc)
1934{
1935
1936	return (arm_cpu_fill_vdso_timehands != NULL ?
1937	    arm_cpu_fill_vdso_timehands(vdso_th, tc) : 0);
1938}
1939