machdep.c revision 294740
1/*	$NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $	*/
2
3/*-
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 1994-1998 Mark Brinicombe.
6 * Copyright (c) 1994 Brini.
7 * All rights reserved.
8 *
9 * This code is derived from software written for Brini by Mark Brinicombe
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by Mark Brinicombe
22 *	for the NetBSD Project.
23 * 4. The name of the company nor the name of the author may be used to
24 *    endorse or promote products derived from this software without specific
25 *    prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * Machine dependant functions for kernel setup
40 *
41 * Created      : 17/09/94
42 * Updated	: 18/04/01 updated for new wscons
43 */
44
45#include "opt_compat.h"
46#include "opt_ddb.h"
47#include "opt_kstack_pages.h"
48#include "opt_platform.h"
49#include "opt_sched.h"
50#include "opt_timer.h"
51
52#include <sys/cdefs.h>
53__FBSDID("$FreeBSD: head/sys/arm/arm/machdep.c 294740 2016-01-25 18:02:28Z zbb $");
54
55#include <sys/param.h>
56#include <sys/proc.h>
57#include <sys/systm.h>
58#include <sys/bio.h>
59#include <sys/buf.h>
60#include <sys/bus.h>
61#include <sys/cons.h>
62#include <sys/cpu.h>
63#include <sys/efi.h>
64#include <sys/exec.h>
65#include <sys/imgact.h>
66#include <sys/kdb.h>
67#include <sys/kernel.h>
68#include <sys/ktr.h>
69#include <sys/linker.h>
70#include <sys/lock.h>
71#include <sys/malloc.h>
72#include <sys/msgbuf.h>
73#include <sys/mutex.h>
74#include <sys/pcpu.h>
75#include <sys/ptrace.h>
76#include <sys/reboot.h>
77#include <sys/rwlock.h>
78#include <sys/sched.h>
79#include <sys/signalvar.h>
80#include <sys/syscallsubr.h>
81#include <sys/sysctl.h>
82#include <sys/sysent.h>
83#include <sys/sysproto.h>
84#include <sys/uio.h>
85#include <sys/vdso.h>
86
87#include <vm/vm.h>
88#include <vm/pmap.h>
89#include <vm/vm_map.h>
90#include <vm/vm_object.h>
91#include <vm/vm_page.h>
92#include <vm/vm_pager.h>
93
94#include <machine/acle-compat.h>
95#include <machine/armreg.h>
96#include <machine/atags.h>
97#include <machine/cpu.h>
98#include <machine/cpuinfo.h>
99#include <machine/debug_monitor.h>
100#include <machine/db_machdep.h>
101#include <machine/devmap.h>
102#include <machine/frame.h>
103#include <machine/intr.h>
104#include <machine/machdep.h>
105#include <machine/md_var.h>
106#include <machine/metadata.h>
107#include <machine/pcb.h>
108#include <machine/physmem.h>
109#include <machine/platform.h>
110#include <machine/reg.h>
111#include <machine/trap.h>
112#include <machine/undefined.h>
113#include <machine/vfp.h>
114#include <machine/vmparam.h>
115#include <machine/sysarch.h>
116
117#ifdef FDT
118#include <dev/fdt/fdt_common.h>
119#include <dev/ofw/openfirm.h>
120#endif
121
122#ifdef DDB
123#include <ddb/ddb.h>
124
125#if __ARM_ARCH >= 6
126#include <machine/cpu-v6.h>
127
128DB_SHOW_COMMAND(cp15, db_show_cp15)
129{
130	u_int reg;
131
132	reg = cp15_midr_get();
133	db_printf("Cpu ID: 0x%08x\n", reg);
134	reg = cp15_ctr_get();
135	db_printf("Current Cache Lvl ID: 0x%08x\n",reg);
136
137	reg = cp15_sctlr_get();
138	db_printf("Ctrl: 0x%08x\n",reg);
139	reg = cp15_actlr_get();
140	db_printf("Aux Ctrl: 0x%08x\n",reg);
141
142	reg = cp15_id_pfr0_get();
143	db_printf("Processor Feat 0: 0x%08x\n", reg);
144	reg = cp15_id_pfr1_get();
145	db_printf("Processor Feat 1: 0x%08x\n", reg);
146	reg = cp15_id_dfr0_get();
147	db_printf("Debug Feat 0: 0x%08x\n", reg);
148	reg = cp15_id_afr0_get();
149	db_printf("Auxiliary Feat 0: 0x%08x\n", reg);
150	reg = cp15_id_mmfr0_get();
151	db_printf("Memory Model Feat 0: 0x%08x\n", reg);
152	reg = cp15_id_mmfr1_get();
153	db_printf("Memory Model Feat 1: 0x%08x\n", reg);
154	reg = cp15_id_mmfr2_get();
155	db_printf("Memory Model Feat 2: 0x%08x\n", reg);
156	reg = cp15_id_mmfr3_get();
157	db_printf("Memory Model Feat 3: 0x%08x\n", reg);
158	reg = cp15_ttbr_get();
159	db_printf("TTB0: 0x%08x\n", reg);
160}
161
162DB_SHOW_COMMAND(vtop, db_show_vtop)
163{
164	u_int reg;
165
166	if (have_addr) {
167		cp15_ats1cpr_set(addr);
168		reg = cp15_par_get();
169		db_printf("Physical address reg: 0x%08x\n",reg);
170	} else
171		db_printf("show vtop <virt_addr>\n");
172}
173#endif /* __ARM_ARCH >= 6 */
174#endif /* DDB */
175
176#ifdef DEBUG
177#define	debugf(fmt, args...) printf(fmt, ##args)
178#else
179#define	debugf(fmt, args...)
180#endif
181
182struct pcpu __pcpu[MAXCPU];
183struct pcpu *pcpup = &__pcpu[0];
184
185static struct trapframe proc0_tf;
186uint32_t cpu_reset_address = 0;
187int cold = 1;
188vm_offset_t vector_page;
189
190int (*_arm_memcpy)(void *, void *, int, int) = NULL;
191int (*_arm_bzero)(void *, int, int) = NULL;
192int _min_memcpy_size = 0;
193int _min_bzero_size = 0;
194
195extern int *end;
196
197#ifdef FDT
198static char *loader_envp;
199
200vm_paddr_t pmap_pa;
201
202#ifdef ARM_NEW_PMAP
203vm_offset_t systempage;
204vm_offset_t irqstack;
205vm_offset_t undstack;
206vm_offset_t abtstack;
207#else
208/*
209 * This is the number of L2 page tables required for covering max
210 * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf,
211 * stacks etc.), uprounded to be divisible by 4.
212 */
213#define KERNEL_PT_MAX	78
214
215static struct pv_addr kernel_pt_table[KERNEL_PT_MAX];
216
217struct pv_addr systempage;
218static struct pv_addr msgbufpv;
219struct pv_addr irqstack;
220struct pv_addr undstack;
221struct pv_addr abtstack;
222static struct pv_addr kernelstack;
223#endif
224#endif
225
226#if defined(LINUX_BOOT_ABI)
227#define LBABI_MAX_BANKS	10
228
229uint32_t board_id;
230struct arm_lbabi_tag *atag_list;
231char linux_command_line[LBABI_MAX_COMMAND_LINE + 1];
232char atags[LBABI_MAX_COMMAND_LINE * 2];
233uint32_t memstart[LBABI_MAX_BANKS];
234uint32_t memsize[LBABI_MAX_BANKS];
235uint32_t membanks;
236#endif
237
238static uint32_t board_revision;
239/* hex representation of uint64_t */
240static char board_serial[32];
241
242SYSCTL_NODE(_hw, OID_AUTO, board, CTLFLAG_RD, 0, "Board attributes");
243SYSCTL_UINT(_hw_board, OID_AUTO, revision, CTLFLAG_RD,
244    &board_revision, 0, "Board revision");
245SYSCTL_STRING(_hw_board, OID_AUTO, serial, CTLFLAG_RD,
246    board_serial, 0, "Board serial");
247
248int vfp_exists;
249SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD,
250    &vfp_exists, 0, "Floating point support enabled");
251
252void
253board_set_serial(uint64_t serial)
254{
255
256	snprintf(board_serial, sizeof(board_serial)-1,
257		    "%016jx", serial);
258}
259
260void
261board_set_revision(uint32_t revision)
262{
263
264	board_revision = revision;
265}
266
267void
268sendsig(catcher, ksi, mask)
269	sig_t catcher;
270	ksiginfo_t *ksi;
271	sigset_t *mask;
272{
273	struct thread *td;
274	struct proc *p;
275	struct trapframe *tf;
276	struct sigframe *fp, frame;
277	struct sigacts *psp;
278	struct sysentvec *sysent;
279	int onstack;
280	int sig;
281	int code;
282
283	td = curthread;
284	p = td->td_proc;
285	PROC_LOCK_ASSERT(p, MA_OWNED);
286	sig = ksi->ksi_signo;
287	code = ksi->ksi_code;
288	psp = p->p_sigacts;
289	mtx_assert(&psp->ps_mtx, MA_OWNED);
290	tf = td->td_frame;
291	onstack = sigonstack(tf->tf_usr_sp);
292
293	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
294	    catcher, sig);
295
296	/* Allocate and validate space for the signal handler context. */
297	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) &&
298	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
299		fp = (struct sigframe *)(td->td_sigstk.ss_sp +
300		    td->td_sigstk.ss_size);
301#if defined(COMPAT_43)
302		td->td_sigstk.ss_flags |= SS_ONSTACK;
303#endif
304	} else
305		fp = (struct sigframe *)td->td_frame->tf_usr_sp;
306
307	/* make room on the stack */
308	fp--;
309
310	/* make the stack aligned */
311	fp = (struct sigframe *)STACKALIGN(fp);
312	/* Populate the siginfo frame. */
313	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
314	frame.sf_si = ksi->ksi_info;
315	frame.sf_uc.uc_sigmask = *mask;
316	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK )
317	    ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
318	frame.sf_uc.uc_stack = td->td_sigstk;
319	mtx_unlock(&psp->ps_mtx);
320	PROC_UNLOCK(td->td_proc);
321
322	/* Copy the sigframe out to the user's stack. */
323	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
324		/* Process has trashed its stack. Kill it. */
325		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
326		PROC_LOCK(p);
327		sigexit(td, SIGILL);
328	}
329
330	/*
331	 * Build context to run handler in.  We invoke the handler
332	 * directly, only returning via the trampoline.  Note the
333	 * trampoline version numbers are coordinated with machine-
334	 * dependent code in libc.
335	 */
336
337	tf->tf_r0 = sig;
338	tf->tf_r1 = (register_t)&fp->sf_si;
339	tf->tf_r2 = (register_t)&fp->sf_uc;
340
341	/* the trampoline uses r5 as the uc address */
342	tf->tf_r5 = (register_t)&fp->sf_uc;
343	tf->tf_pc = (register_t)catcher;
344	tf->tf_usr_sp = (register_t)fp;
345	sysent = p->p_sysent;
346	if (sysent->sv_sigcode_base != 0)
347		tf->tf_usr_lr = (register_t)sysent->sv_sigcode_base;
348	else
349		tf->tf_usr_lr = (register_t)(sysent->sv_psstrings -
350		    *(sysent->sv_szsigcode));
351	/* Set the mode to enter in the signal handler */
352#if __ARM_ARCH >= 7
353	if ((register_t)catcher & 1)
354		tf->tf_spsr |= PSR_T;
355	else
356		tf->tf_spsr &= ~PSR_T;
357#endif
358
359	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr,
360	    tf->tf_usr_sp);
361
362	PROC_LOCK(p);
363	mtx_lock(&psp->ps_mtx);
364}
365
366struct kva_md_info kmi;
367
368/*
369 * arm32_vector_init:
370 *
371 *	Initialize the vector page, and select whether or not to
372 *	relocate the vectors.
373 *
374 *	NOTE: We expect the vector page to be mapped at its expected
375 *	destination.
376 */
377
378extern unsigned int page0[], page0_data[];
379void
380arm_vector_init(vm_offset_t va, int which)
381{
382	unsigned int *vectors = (int *) va;
383	unsigned int *vectors_data = vectors + (page0_data - page0);
384	int vec;
385
386	/*
387	 * Loop through the vectors we're taking over, and copy the
388	 * vector's insn and data word.
389	 */
390	for (vec = 0; vec < ARM_NVEC; vec++) {
391		if ((which & (1 << vec)) == 0) {
392			/* Don't want to take over this vector. */
393			continue;
394		}
395		vectors[vec] = page0[vec];
396		vectors_data[vec] = page0_data[vec];
397	}
398
399	/* Now sync the vectors. */
400	cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));
401
402	vector_page = va;
403
404	if (va == ARM_VECTORS_HIGH) {
405		/*
406		 * Assume the MD caller knows what it's doing here, and
407		 * really does want the vector page relocated.
408		 *
409		 * Note: This has to be done here (and not just in
410		 * cpu_setup()) because the vector page needs to be
411		 * accessible *before* cpu_startup() is called.
412		 * Think ddb(9) ...
413		 *
414		 * NOTE: If the CPU control register is not readable,
415		 * this will totally fail!  We'll just assume that
416		 * any system that has high vector support has a
417		 * readable CPU control register, for now.  If we
418		 * ever encounter one that does not, we'll have to
419		 * rethink this.
420		 */
421		cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
422	}
423}
424
425static void
426cpu_startup(void *dummy)
427{
428	struct pcb *pcb = thread0.td_pcb;
429	const unsigned int mbyte = 1024 * 1024;
430#ifdef ARM_TP_ADDRESS
431#ifndef ARM_CACHE_LOCK_ENABLE
432	vm_page_t m;
433#endif
434#endif
435
436	identify_arm_cpu();
437
438	vm_ksubmap_init(&kmi);
439
440	/*
441	 * Display the RAM layout.
442	 */
443	printf("real memory  = %ju (%ju MB)\n",
444	    (uintmax_t)arm32_ptob(realmem),
445	    (uintmax_t)arm32_ptob(realmem) / mbyte);
446	printf("avail memory = %ju (%ju MB)\n",
447	    (uintmax_t)arm32_ptob(vm_cnt.v_free_count),
448	    (uintmax_t)arm32_ptob(vm_cnt.v_free_count) / mbyte);
449	if (bootverbose) {
450		arm_physmem_print_tables();
451		arm_devmap_print_table();
452	}
453
454	bufinit();
455	vm_pager_bufferinit();
456	pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack +
457	    USPACE_SVC_STACK_TOP;
458	pmap_set_pcb_pagedir(pmap_kernel(), pcb);
459#ifndef ARM_NEW_PMAP
460	vector_page_setprot(VM_PROT_READ);
461	pmap_postinit();
462#endif
463#ifdef ARM_TP_ADDRESS
464#ifdef ARM_CACHE_LOCK_ENABLE
465	pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
466	arm_lock_cache_line(ARM_TP_ADDRESS);
467#else
468	m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO);
469	pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m));
470#endif
471	*(uint32_t *)ARM_RAS_START = 0;
472	*(uint32_t *)ARM_RAS_END = 0xffffffff;
473#endif
474}
475
476SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
477
478/*
479 * Flush the D-cache for non-DMA I/O so that the I-cache can
480 * be made coherent later.
481 */
482void
483cpu_flush_dcache(void *ptr, size_t len)
484{
485
486	cpu_dcache_wb_range((uintptr_t)ptr, len);
487#ifdef ARM_L2_PIPT
488	cpu_l2cache_wb_range((uintptr_t)vtophys(ptr), len);
489#else
490	cpu_l2cache_wb_range((uintptr_t)ptr, len);
491#endif
492}
493
494/* Get current clock frequency for the given cpu id. */
495int
496cpu_est_clockrate(int cpu_id, uint64_t *rate)
497{
498
499	return (ENXIO);
500}
501
502void
503cpu_idle(int busy)
504{
505
506	CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu);
507	spinlock_enter();
508#ifndef NO_EVENTTIMERS
509	if (!busy)
510		cpu_idleclock();
511#endif
512	if (!sched_runnable())
513		cpu_sleep(0);
514#ifndef NO_EVENTTIMERS
515	if (!busy)
516		cpu_activeclock();
517#endif
518	spinlock_exit();
519	CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu);
520}
521
522int
523cpu_idle_wakeup(int cpu)
524{
525
526	return (0);
527}
528
529/*
530 * Most ARM platforms don't need to do anything special to init their clocks
531 * (they get intialized during normal device attachment), and by not defining a
532 * cpu_initclocks() function they get this generic one.  Any platform that needs
533 * to do something special can just provide their own implementation, which will
534 * override this one due to the weak linkage.
535 */
536void
537arm_generic_initclocks(void)
538{
539
540#ifndef NO_EVENTTIMERS
541#ifdef SMP
542	if (PCPU_GET(cpuid) == 0)
543		cpu_initclocks_bsp();
544	else
545		cpu_initclocks_ap();
546#else
547	cpu_initclocks_bsp();
548#endif
549#endif
550}
551__weak_reference(arm_generic_initclocks, cpu_initclocks);
552
553int
554fill_regs(struct thread *td, struct reg *regs)
555{
556	struct trapframe *tf = td->td_frame;
557	bcopy(&tf->tf_r0, regs->r, sizeof(regs->r));
558	regs->r_sp = tf->tf_usr_sp;
559	regs->r_lr = tf->tf_usr_lr;
560	regs->r_pc = tf->tf_pc;
561	regs->r_cpsr = tf->tf_spsr;
562	return (0);
563}
564int
565fill_fpregs(struct thread *td, struct fpreg *regs)
566{
567	bzero(regs, sizeof(*regs));
568	return (0);
569}
570
571int
572set_regs(struct thread *td, struct reg *regs)
573{
574	struct trapframe *tf = td->td_frame;
575
576	bcopy(regs->r, &tf->tf_r0, sizeof(regs->r));
577	tf->tf_usr_sp = regs->r_sp;
578	tf->tf_usr_lr = regs->r_lr;
579	tf->tf_pc = regs->r_pc;
580	tf->tf_spsr &=  ~PSR_FLAGS;
581	tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS;
582	return (0);
583}
584
585int
586set_fpregs(struct thread *td, struct fpreg *regs)
587{
588	return (0);
589}
590
591int
592fill_dbregs(struct thread *td, struct dbreg *regs)
593{
594	return (0);
595}
596int
597set_dbregs(struct thread *td, struct dbreg *regs)
598{
599	return (0);
600}
601
602
603static int
604ptrace_read_int(struct thread *td, vm_offset_t addr, uint32_t *v)
605{
606
607	if (proc_readmem(td, td->td_proc, addr, v, sizeof(*v)) != sizeof(*v))
608		return (ENOMEM);
609	return (0);
610}
611
612static int
613ptrace_write_int(struct thread *td, vm_offset_t addr, uint32_t v)
614{
615
616	if (proc_writemem(td, td->td_proc, addr, &v, sizeof(v)) != sizeof(v))
617		return (ENOMEM);
618	return (0);
619}
620
621static u_int
622ptrace_get_usr_reg(void *cookie, int reg)
623{
624	int ret;
625	struct thread *td = cookie;
626
627	KASSERT(((reg >= 0) && (reg <= ARM_REG_NUM_PC)),
628	 ("reg is outside range"));
629
630	switch(reg) {
631	case ARM_REG_NUM_PC:
632		ret = td->td_frame->tf_pc;
633		break;
634	case ARM_REG_NUM_LR:
635		ret = td->td_frame->tf_usr_lr;
636		break;
637	case ARM_REG_NUM_SP:
638		ret = td->td_frame->tf_usr_sp;
639		break;
640	default:
641		ret = *((register_t*)&td->td_frame->tf_r0 + reg);
642		break;
643	}
644
645	return (ret);
646}
647
648static u_int
649ptrace_get_usr_int(void* cookie, vm_offset_t offset, u_int* val)
650{
651	struct thread *td = cookie;
652	u_int error;
653
654	error = ptrace_read_int(td, offset, val);
655
656	return (error);
657}
658
659/**
660 * This function parses current instruction opcode and decodes
661 * any possible jump (change in PC) which might occur after
662 * the instruction is executed.
663 *
664 * @param     td                Thread structure of analysed task
665 * @param     cur_instr         Currently executed instruction
666 * @param     alt_next_address  Pointer to the variable where
667 *                              the destination address of the
668 *                              jump instruction shall be stored.
669 *
670 * @return    <0>               when jump is possible
671 *            <EINVAL>          otherwise
672 */
673static int
674ptrace_get_alternative_next(struct thread *td, uint32_t cur_instr,
675    uint32_t *alt_next_address)
676{
677	int error;
678
679	if (inst_branch(cur_instr) || inst_call(cur_instr) ||
680	    inst_return(cur_instr)) {
681		error = arm_predict_branch(td, cur_instr, td->td_frame->tf_pc,
682		    alt_next_address, ptrace_get_usr_reg, ptrace_get_usr_int);
683
684		return (error);
685	}
686
687	return (EINVAL);
688}
689
690int
691ptrace_single_step(struct thread *td)
692{
693	struct proc *p;
694	int error, error_alt;
695	uint32_t cur_instr, alt_next = 0;
696
697	/* TODO: This needs to be updated for Thumb-2 */
698	if ((td->td_frame->tf_spsr & PSR_T) != 0)
699		return (EINVAL);
700
701	KASSERT(td->td_md.md_ptrace_instr == 0,
702	 ("Didn't clear single step"));
703	KASSERT(td->td_md.md_ptrace_instr_alt == 0,
704	 ("Didn't clear alternative single step"));
705	p = td->td_proc;
706	PROC_UNLOCK(p);
707
708	error = ptrace_read_int(td, td->td_frame->tf_pc,
709	    &cur_instr);
710	if (error)
711		goto out;
712
713	error = ptrace_read_int(td, td->td_frame->tf_pc + INSN_SIZE,
714	    &td->td_md.md_ptrace_instr);
715	if (error == 0) {
716		error = ptrace_write_int(td, td->td_frame->tf_pc + INSN_SIZE,
717		    PTRACE_BREAKPOINT);
718		if (error) {
719			td->td_md.md_ptrace_instr = 0;
720		} else {
721			td->td_md.md_ptrace_addr = td->td_frame->tf_pc +
722			    INSN_SIZE;
723		}
724	}
725
726	error_alt = ptrace_get_alternative_next(td, cur_instr, &alt_next);
727	if (error_alt == 0) {
728		error_alt = ptrace_read_int(td, alt_next,
729		    &td->td_md.md_ptrace_instr_alt);
730		if (error_alt) {
731			td->td_md.md_ptrace_instr_alt = 0;
732		} else {
733			error_alt = ptrace_write_int(td, alt_next,
734			    PTRACE_BREAKPOINT);
735			if (error_alt)
736				td->td_md.md_ptrace_instr_alt = 0;
737			else
738				td->td_md.md_ptrace_addr_alt = alt_next;
739		}
740	}
741
742out:
743	PROC_LOCK(p);
744	return ((error != 0) && (error_alt != 0));
745}
746
747int
748ptrace_clear_single_step(struct thread *td)
749{
750	struct proc *p;
751
752	/* TODO: This needs to be updated for Thumb-2 */
753	if ((td->td_frame->tf_spsr & PSR_T) != 0)
754		return (EINVAL);
755
756	if (td->td_md.md_ptrace_instr != 0) {
757		p = td->td_proc;
758		PROC_UNLOCK(p);
759		ptrace_write_int(td, td->td_md.md_ptrace_addr,
760		    td->td_md.md_ptrace_instr);
761		PROC_LOCK(p);
762		td->td_md.md_ptrace_instr = 0;
763	}
764
765	if (td->td_md.md_ptrace_instr_alt != 0) {
766		p = td->td_proc;
767		PROC_UNLOCK(p);
768		ptrace_write_int(td, td->td_md.md_ptrace_addr_alt,
769		    td->td_md.md_ptrace_instr_alt);
770		PROC_LOCK(p);
771		td->td_md.md_ptrace_instr_alt = 0;
772	}
773
774	return (0);
775}
776
777int
778ptrace_set_pc(struct thread *td, unsigned long addr)
779{
780	td->td_frame->tf_pc = addr;
781	return (0);
782}
783
784void
785cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
786{
787}
788
789void
790spinlock_enter(void)
791{
792	struct thread *td;
793	register_t cspr;
794
795	td = curthread;
796	if (td->td_md.md_spinlock_count == 0) {
797		cspr = disable_interrupts(PSR_I | PSR_F);
798		td->td_md.md_spinlock_count = 1;
799		td->td_md.md_saved_cspr = cspr;
800	} else
801		td->td_md.md_spinlock_count++;
802	critical_enter();
803}
804
805void
806spinlock_exit(void)
807{
808	struct thread *td;
809	register_t cspr;
810
811	td = curthread;
812	critical_exit();
813	cspr = td->td_md.md_saved_cspr;
814	td->td_md.md_spinlock_count--;
815	if (td->td_md.md_spinlock_count == 0)
816		restore_interrupts(cspr);
817}
818
819/*
820 * Clear registers on exec
821 */
822void
823exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
824{
825	struct trapframe *tf = td->td_frame;
826
827	memset(tf, 0, sizeof(*tf));
828	tf->tf_usr_sp = stack;
829	tf->tf_usr_lr = imgp->entry_addr;
830	tf->tf_svc_lr = 0x77777777;
831	tf->tf_pc = imgp->entry_addr;
832	tf->tf_spsr = PSR_USR32_MODE;
833}
834
835/*
836 * Get machine context.
837 */
838int
839get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
840{
841	struct trapframe *tf = td->td_frame;
842	__greg_t *gr = mcp->__gregs;
843
844	if (clear_ret & GET_MC_CLEAR_RET) {
845		gr[_REG_R0] = 0;
846		gr[_REG_CPSR] = tf->tf_spsr & ~PSR_C;
847	} else {
848		gr[_REG_R0]   = tf->tf_r0;
849		gr[_REG_CPSR] = tf->tf_spsr;
850	}
851	gr[_REG_R1]   = tf->tf_r1;
852	gr[_REG_R2]   = tf->tf_r2;
853	gr[_REG_R3]   = tf->tf_r3;
854	gr[_REG_R4]   = tf->tf_r4;
855	gr[_REG_R5]   = tf->tf_r5;
856	gr[_REG_R6]   = tf->tf_r6;
857	gr[_REG_R7]   = tf->tf_r7;
858	gr[_REG_R8]   = tf->tf_r8;
859	gr[_REG_R9]   = tf->tf_r9;
860	gr[_REG_R10]  = tf->tf_r10;
861	gr[_REG_R11]  = tf->tf_r11;
862	gr[_REG_R12]  = tf->tf_r12;
863	gr[_REG_SP]   = tf->tf_usr_sp;
864	gr[_REG_LR]   = tf->tf_usr_lr;
865	gr[_REG_PC]   = tf->tf_pc;
866
867	return (0);
868}
869
870/*
871 * Set machine context.
872 *
873 * However, we don't set any but the user modifiable flags, and we won't
874 * touch the cs selector.
875 */
876int
877set_mcontext(struct thread *td, mcontext_t *mcp)
878{
879	struct trapframe *tf = td->td_frame;
880	const __greg_t *gr = mcp->__gregs;
881
882	tf->tf_r0 = gr[_REG_R0];
883	tf->tf_r1 = gr[_REG_R1];
884	tf->tf_r2 = gr[_REG_R2];
885	tf->tf_r3 = gr[_REG_R3];
886	tf->tf_r4 = gr[_REG_R4];
887	tf->tf_r5 = gr[_REG_R5];
888	tf->tf_r6 = gr[_REG_R6];
889	tf->tf_r7 = gr[_REG_R7];
890	tf->tf_r8 = gr[_REG_R8];
891	tf->tf_r9 = gr[_REG_R9];
892	tf->tf_r10 = gr[_REG_R10];
893	tf->tf_r11 = gr[_REG_R11];
894	tf->tf_r12 = gr[_REG_R12];
895	tf->tf_usr_sp = gr[_REG_SP];
896	tf->tf_usr_lr = gr[_REG_LR];
897	tf->tf_pc = gr[_REG_PC];
898	tf->tf_spsr = gr[_REG_CPSR];
899
900	return (0);
901}
902
903/*
904 * MPSAFE
905 */
906int
907sys_sigreturn(td, uap)
908	struct thread *td;
909	struct sigreturn_args /* {
910		const struct __ucontext *sigcntxp;
911	} */ *uap;
912{
913	ucontext_t uc;
914	int spsr;
915
916	if (uap == NULL)
917		return (EFAULT);
918	if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
919		return (EFAULT);
920	/*
921	 * Make sure the processor mode has not been tampered with and
922	 * interrupts have not been disabled.
923	 */
924	spsr = uc.uc_mcontext.__gregs[_REG_CPSR];
925	if ((spsr & PSR_MODE) != PSR_USR32_MODE ||
926	    (spsr & (PSR_I | PSR_F)) != 0)
927		return (EINVAL);
928		/* Restore register context. */
929	set_mcontext(td, &uc.uc_mcontext);
930
931	/* Restore signal mask. */
932	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
933
934	return (EJUSTRETURN);
935}
936
937
938/*
939 * Construct a PCB from a trapframe. This is called from kdb_trap() where
940 * we want to start a backtrace from the function that caused us to enter
941 * the debugger. We have the context in the trapframe, but base the trace
942 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
943 * enough for a backtrace.
944 */
945void
946makectx(struct trapframe *tf, struct pcb *pcb)
947{
948	pcb->pcb_regs.sf_r4 = tf->tf_r4;
949	pcb->pcb_regs.sf_r5 = tf->tf_r5;
950	pcb->pcb_regs.sf_r6 = tf->tf_r6;
951	pcb->pcb_regs.sf_r7 = tf->tf_r7;
952	pcb->pcb_regs.sf_r8 = tf->tf_r8;
953	pcb->pcb_regs.sf_r9 = tf->tf_r9;
954	pcb->pcb_regs.sf_r10 = tf->tf_r10;
955	pcb->pcb_regs.sf_r11 = tf->tf_r11;
956	pcb->pcb_regs.sf_r12 = tf->tf_r12;
957	pcb->pcb_regs.sf_pc = tf->tf_pc;
958	pcb->pcb_regs.sf_lr = tf->tf_usr_lr;
959	pcb->pcb_regs.sf_sp = tf->tf_usr_sp;
960}
961
962/*
963 * Fake up a boot descriptor table
964 */
965vm_offset_t
966fake_preload_metadata(struct arm_boot_params *abp __unused)
967{
968#ifdef DDB
969	vm_offset_t zstart = 0, zend = 0;
970#endif
971	vm_offset_t lastaddr;
972	int i = 0;
973	static uint32_t fake_preload[35];
974
975	fake_preload[i++] = MODINFO_NAME;
976	fake_preload[i++] = strlen("kernel") + 1;
977	strcpy((char*)&fake_preload[i++], "kernel");
978	i += 1;
979	fake_preload[i++] = MODINFO_TYPE;
980	fake_preload[i++] = strlen("elf kernel") + 1;
981	strcpy((char*)&fake_preload[i++], "elf kernel");
982	i += 2;
983	fake_preload[i++] = MODINFO_ADDR;
984	fake_preload[i++] = sizeof(vm_offset_t);
985	fake_preload[i++] = KERNVIRTADDR;
986	fake_preload[i++] = MODINFO_SIZE;
987	fake_preload[i++] = sizeof(uint32_t);
988	fake_preload[i++] = (uint32_t)&end - KERNVIRTADDR;
989#ifdef DDB
990	if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) {
991		fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM;
992		fake_preload[i++] = sizeof(vm_offset_t);
993		fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4);
994		fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM;
995		fake_preload[i++] = sizeof(vm_offset_t);
996		fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8);
997		lastaddr = *(uint32_t *)(KERNVIRTADDR + 8);
998		zend = lastaddr;
999		zstart = *(uint32_t *)(KERNVIRTADDR + 4);
1000		db_fetch_ksymtab(zstart, zend);
1001	} else
1002#endif
1003		lastaddr = (vm_offset_t)&end;
1004	fake_preload[i++] = 0;
1005	fake_preload[i] = 0;
1006	preload_metadata = (void *)fake_preload;
1007
1008	init_static_kenv(NULL, 0);
1009
1010	return (lastaddr);
1011}
1012
1013void
1014pcpu0_init(void)
1015{
1016#if __ARM_ARCH >= 6
1017	set_curthread(&thread0);
1018#endif
1019	pcpu_init(pcpup, 0, sizeof(struct pcpu));
1020	PCPU_SET(curthread, &thread0);
1021}
1022
1023#if defined(LINUX_BOOT_ABI)
1024vm_offset_t
1025linux_parse_boot_param(struct arm_boot_params *abp)
1026{
1027	struct arm_lbabi_tag *walker;
1028	uint32_t revision;
1029	uint64_t serial;
1030
1031	/*
1032	 * Linux boot ABI: r0 = 0, r1 is the board type (!= 0) and r2
1033	 * is atags or dtb pointer.  If all of these aren't satisfied,
1034	 * then punt.
1035	 */
1036	if (!(abp->abp_r0 == 0 && abp->abp_r1 != 0 && abp->abp_r2 != 0))
1037		return 0;
1038
1039	board_id = abp->abp_r1;
1040	walker = (struct arm_lbabi_tag *)
1041	    (abp->abp_r2 + KERNVIRTADDR - abp->abp_physaddr);
1042
1043	/* xxx - Need to also look for binary device tree */
1044	if (ATAG_TAG(walker) != ATAG_CORE)
1045		return 0;
1046
1047	atag_list = walker;
1048	while (ATAG_TAG(walker) != ATAG_NONE) {
1049		switch (ATAG_TAG(walker)) {
1050		case ATAG_CORE:
1051			break;
1052		case ATAG_MEM:
1053			arm_physmem_hardware_region(walker->u.tag_mem.start,
1054			    walker->u.tag_mem.size);
1055			break;
1056		case ATAG_INITRD2:
1057			break;
1058		case ATAG_SERIAL:
1059			serial = walker->u.tag_sn.low |
1060			    ((uint64_t)walker->u.tag_sn.high << 32);
1061			board_set_serial(serial);
1062			break;
1063		case ATAG_REVISION:
1064			revision = walker->u.tag_rev.rev;
1065			board_set_revision(revision);
1066			break;
1067		case ATAG_CMDLINE:
1068			/* XXX open question: Parse this for boothowto? */
1069			bcopy(walker->u.tag_cmd.command, linux_command_line,
1070			      ATAG_SIZE(walker));
1071			break;
1072		default:
1073			break;
1074		}
1075		walker = ATAG_NEXT(walker);
1076	}
1077
1078	/* Save a copy for later */
1079	bcopy(atag_list, atags,
1080	    (char *)walker - (char *)atag_list + ATAG_SIZE(walker));
1081
1082	init_static_kenv(NULL, 0);
1083
1084	return fake_preload_metadata(abp);
1085}
1086#endif
1087
1088#if defined(FREEBSD_BOOT_LOADER)
1089vm_offset_t
1090freebsd_parse_boot_param(struct arm_boot_params *abp)
1091{
1092	vm_offset_t lastaddr = 0;
1093	void *mdp;
1094	void *kmdp;
1095#ifdef DDB
1096	vm_offset_t ksym_start;
1097	vm_offset_t ksym_end;
1098#endif
1099
1100	/*
1101	 * Mask metadata pointer: it is supposed to be on page boundary. If
1102	 * the first argument (mdp) doesn't point to a valid address the
1103	 * bootloader must have passed us something else than the metadata
1104	 * ptr, so we give up.  Also give up if we cannot find metadta section
1105	 * the loader creates that we get all this data out of.
1106	 */
1107
1108	if ((mdp = (void *)(abp->abp_r0 & ~PAGE_MASK)) == NULL)
1109		return 0;
1110	preload_metadata = mdp;
1111	kmdp = preload_search_by_type("elf kernel");
1112	if (kmdp == NULL)
1113		return 0;
1114
1115	boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
1116	loader_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
1117	init_static_kenv(loader_envp, 0);
1118	lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
1119#ifdef DDB
1120	ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
1121	ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
1122	db_fetch_ksymtab(ksym_start, ksym_end);
1123#endif
1124	return lastaddr;
1125}
1126#endif
1127
1128vm_offset_t
1129default_parse_boot_param(struct arm_boot_params *abp)
1130{
1131	vm_offset_t lastaddr;
1132
1133#if defined(LINUX_BOOT_ABI)
1134	if ((lastaddr = linux_parse_boot_param(abp)) != 0)
1135		return lastaddr;
1136#endif
1137#if defined(FREEBSD_BOOT_LOADER)
1138	if ((lastaddr = freebsd_parse_boot_param(abp)) != 0)
1139		return lastaddr;
1140#endif
1141	/* Fall back to hardcoded metadata. */
1142	lastaddr = fake_preload_metadata(abp);
1143
1144	return lastaddr;
1145}
1146
1147/*
1148 * Stub version of the boot parameter parsing routine.  We are
1149 * called early in initarm, before even VM has been initialized.
1150 * This routine needs to preserve any data that the boot loader
1151 * has passed in before the kernel starts to grow past the end
1152 * of the BSS, traditionally the place boot-loaders put this data.
1153 *
1154 * Since this is called so early, things that depend on the vm system
1155 * being setup (including access to some SoC's serial ports), about
1156 * all that can be done in this routine is to copy the arguments.
1157 *
1158 * This is the default boot parameter parsing routine.  Individual
1159 * kernels/boards can override this weak function with one of their
1160 * own.  We just fake metadata...
1161 */
1162__weak_reference(default_parse_boot_param, parse_boot_param);
1163
1164/*
1165 * Initialize proc0
1166 */
1167void
1168init_proc0(vm_offset_t kstack)
1169{
1170	proc_linkup0(&proc0, &thread0);
1171	thread0.td_kstack = kstack;
1172	thread0.td_pcb = (struct pcb *)
1173		(thread0.td_kstack + kstack_pages * PAGE_SIZE) - 1;
1174	thread0.td_pcb->pcb_flags = 0;
1175	thread0.td_pcb->pcb_vfpcpu = -1;
1176	thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN;
1177	thread0.td_frame = &proc0_tf;
1178	pcpup->pc_curpcb = thread0.td_pcb;
1179}
1180
1181int
1182arm_predict_branch(void *cookie, u_int insn, register_t pc, register_t *new_pc,
1183    u_int (*fetch_reg)(void*, int), u_int (*read_int)(void*, vm_offset_t, u_int*))
1184{
1185	u_int addr, nregs, offset = 0;
1186	int error = 0;
1187
1188	switch ((insn >> 24) & 0xf) {
1189	case 0x2:	/* add pc, reg1, #value */
1190	case 0x0:	/* add pc, reg1, reg2, lsl #offset */
1191		addr = fetch_reg(cookie, (insn >> 16) & 0xf);
1192		if (((insn >> 16) & 0xf) == 15)
1193			addr += 8;
1194		if (insn & 0x0200000) {
1195			offset = (insn >> 7) & 0x1e;
1196			offset = (insn & 0xff) << (32 - offset) |
1197			    (insn & 0xff) >> offset;
1198		} else {
1199
1200			offset = fetch_reg(cookie, insn & 0x0f);
1201			if ((insn & 0x0000ff0) != 0x00000000) {
1202				if (insn & 0x10)
1203					nregs = fetch_reg(cookie,
1204					    (insn >> 8) & 0xf);
1205				else
1206					nregs = (insn >> 7) & 0x1f;
1207				switch ((insn >> 5) & 3) {
1208				case 0:
1209					/* lsl */
1210					offset = offset << nregs;
1211					break;
1212				case 1:
1213					/* lsr */
1214					offset = offset >> nregs;
1215					break;
1216				default:
1217					break; /* XXX */
1218				}
1219
1220			}
1221			*new_pc = addr + offset;
1222			return (0);
1223
1224		}
1225
1226	case 0xa:	/* b ... */
1227	case 0xb:	/* bl ... */
1228		addr = ((insn << 2) & 0x03ffffff);
1229		if (addr & 0x02000000)
1230			addr |= 0xfc000000;
1231		*new_pc = (pc + 8 + addr);
1232		return (0);
1233	case 0x7:	/* ldr pc, [pc, reg, lsl #2] */
1234		addr = fetch_reg(cookie, insn & 0xf);
1235		addr = pc + 8 + (addr << 2);
1236		error = read_int(cookie, addr, &addr);
1237		*new_pc = addr;
1238		return (error);
1239	case 0x1:	/* mov pc, reg */
1240		*new_pc = fetch_reg(cookie, insn & 0xf);
1241		return (0);
1242	case 0x4:
1243	case 0x5:	/* ldr pc, [reg] */
1244		addr = fetch_reg(cookie, (insn >> 16) & 0xf);
1245		/* ldr pc, [reg, #offset] */
1246		if (insn & (1 << 24))
1247			offset = insn & 0xfff;
1248		if (insn & 0x00800000)
1249			addr += offset;
1250		else
1251			addr -= offset;
1252		error = read_int(cookie, addr, &addr);
1253		*new_pc = addr;
1254
1255		return (error);
1256	case 0x8:	/* ldmxx reg, {..., pc} */
1257	case 0x9:
1258		addr = fetch_reg(cookie, (insn >> 16) & 0xf);
1259		nregs = (insn  & 0x5555) + ((insn  >> 1) & 0x5555);
1260		nregs = (nregs & 0x3333) + ((nregs >> 2) & 0x3333);
1261		nregs = (nregs + (nregs >> 4)) & 0x0f0f;
1262		nregs = (nregs + (nregs >> 8)) & 0x001f;
1263		switch ((insn >> 23) & 0x3) {
1264		case 0x0:	/* ldmda */
1265			addr = addr - 0;
1266			break;
1267		case 0x1:	/* ldmia */
1268			addr = addr + 0 + ((nregs - 1) << 2);
1269			break;
1270		case 0x2:	/* ldmdb */
1271			addr = addr - 4;
1272			break;
1273		case 0x3:	/* ldmib */
1274			addr = addr + 4 + ((nregs - 1) << 2);
1275			break;
1276		}
1277		error = read_int(cookie, addr, &addr);
1278		*new_pc = addr;
1279
1280		return (error);
1281	default:
1282		return (EINVAL);
1283	}
1284}
1285
1286#ifdef ARM_NEW_PMAP
1287void
1288set_stackptrs(int cpu)
1289{
1290
1291	set_stackptr(PSR_IRQ32_MODE,
1292	    irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1293	set_stackptr(PSR_ABT32_MODE,
1294	    abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1295	set_stackptr(PSR_UND32_MODE,
1296	    undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1297}
1298#else
1299void
1300set_stackptrs(int cpu)
1301{
1302
1303	set_stackptr(PSR_IRQ32_MODE,
1304	    irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1305	set_stackptr(PSR_ABT32_MODE,
1306	    abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1307	set_stackptr(PSR_UND32_MODE,
1308	    undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
1309}
1310#endif
1311
1312#ifdef EFI
1313#define efi_next_descriptor(ptr, size) \
1314	((struct efi_md *)(((uint8_t *) ptr) + size))
1315
1316static void
1317add_efi_map_entries(struct efi_map_header *efihdr, struct mem_region *mr,
1318    int *mrcnt, uint32_t *memsize)
1319{
1320	struct efi_md *map, *p;
1321	const char *type;
1322	size_t efisz, memory_size;
1323	int ndesc, i, j;
1324
1325	static const char *types[] = {
1326		"Reserved",
1327		"LoaderCode",
1328		"LoaderData",
1329		"BootServicesCode",
1330		"BootServicesData",
1331		"RuntimeServicesCode",
1332		"RuntimeServicesData",
1333		"ConventionalMemory",
1334		"UnusableMemory",
1335		"ACPIReclaimMemory",
1336		"ACPIMemoryNVS",
1337		"MemoryMappedIO",
1338		"MemoryMappedIOPortSpace",
1339		"PalCode"
1340	};
1341
1342	*mrcnt = 0;
1343	*memsize = 0;
1344
1345	/*
1346	 * Memory map data provided by UEFI via the GetMemoryMap
1347	 * Boot Services API.
1348	 */
1349	efisz = roundup2(sizeof(struct efi_map_header), 0x10);
1350	map = (struct efi_md *)((uint8_t *)efihdr + efisz);
1351
1352	if (efihdr->descriptor_size == 0)
1353		return;
1354	ndesc = efihdr->memory_size / efihdr->descriptor_size;
1355
1356	if (boothowto & RB_VERBOSE)
1357		printf("%23s %12s %12s %8s %4s\n",
1358		    "Type", "Physical", "Virtual", "#Pages", "Attr");
1359
1360	memory_size = 0;
1361	for (i = 0, j = 0, p = map; i < ndesc; i++,
1362	    p = efi_next_descriptor(p, efihdr->descriptor_size)) {
1363		if (boothowto & RB_VERBOSE) {
1364			if (p->md_type <= EFI_MD_TYPE_PALCODE)
1365				type = types[p->md_type];
1366			else
1367				type = "<INVALID>";
1368			printf("%23s %012llx %12p %08llx ", type, p->md_phys,
1369			    p->md_virt, p->md_pages);
1370			if (p->md_attr & EFI_MD_ATTR_UC)
1371				printf("UC ");
1372			if (p->md_attr & EFI_MD_ATTR_WC)
1373				printf("WC ");
1374			if (p->md_attr & EFI_MD_ATTR_WT)
1375				printf("WT ");
1376			if (p->md_attr & EFI_MD_ATTR_WB)
1377				printf("WB ");
1378			if (p->md_attr & EFI_MD_ATTR_UCE)
1379				printf("UCE ");
1380			if (p->md_attr & EFI_MD_ATTR_WP)
1381				printf("WP ");
1382			if (p->md_attr & EFI_MD_ATTR_RP)
1383				printf("RP ");
1384			if (p->md_attr & EFI_MD_ATTR_XP)
1385				printf("XP ");
1386			if (p->md_attr & EFI_MD_ATTR_RT)
1387				printf("RUNTIME");
1388			printf("\n");
1389		}
1390
1391		switch (p->md_type) {
1392		case EFI_MD_TYPE_CODE:
1393		case EFI_MD_TYPE_DATA:
1394		case EFI_MD_TYPE_BS_CODE:
1395		case EFI_MD_TYPE_BS_DATA:
1396		case EFI_MD_TYPE_FREE:
1397			/*
1398			 * We're allowed to use any entry with these types.
1399			 */
1400			break;
1401		default:
1402			continue;
1403		}
1404
1405		j++;
1406		if (j >= FDT_MEM_REGIONS)
1407			break;
1408
1409		mr[j].mr_start = p->md_phys;
1410		mr[j].mr_size = p->md_pages * PAGE_SIZE;
1411		memory_size += mr[j].mr_size;
1412	}
1413
1414	*mrcnt = j;
1415	*memsize = memory_size;
1416}
1417#endif /* EFI */
1418
1419#ifdef FDT
1420static char *
1421kenv_next(char *cp)
1422{
1423
1424	if (cp != NULL) {
1425		while (*cp != 0)
1426			cp++;
1427		cp++;
1428		if (*cp == 0)
1429			cp = NULL;
1430	}
1431	return (cp);
1432}
1433
1434static void
1435print_kenv(void)
1436{
1437	char *cp;
1438
1439	debugf("loader passed (static) kenv:\n");
1440	if (loader_envp == NULL) {
1441		debugf(" no env, null ptr\n");
1442		return;
1443	}
1444	debugf(" loader_envp = 0x%08x\n", (uint32_t)loader_envp);
1445
1446	for (cp = loader_envp; cp != NULL; cp = kenv_next(cp))
1447		debugf(" %x %s\n", (uint32_t)cp, cp);
1448}
1449
1450#ifndef ARM_NEW_PMAP
1451void *
1452initarm(struct arm_boot_params *abp)
1453{
1454	struct mem_region mem_regions[FDT_MEM_REGIONS];
1455	struct pv_addr kernel_l1pt;
1456	struct pv_addr dpcpu;
1457	vm_offset_t dtbp, freemempos, l2_start, lastaddr;
1458	uint32_t memsize, l2size;
1459	char *env;
1460	void *kmdp;
1461	u_int l1pagetable;
1462	int i, j, err_devmap, mem_regions_sz;
1463
1464	lastaddr = parse_boot_param(abp);
1465	arm_physmem_kernaddr = abp->abp_physaddr;
1466
1467	memsize = 0;
1468
1469	cpuinfo_init();
1470	set_cpufuncs();
1471
1472	/*
1473	 * Find the dtb passed in by the boot loader.
1474	 */
1475	kmdp = preload_search_by_type("elf kernel");
1476	if (kmdp != NULL)
1477		dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
1478	else
1479		dtbp = (vm_offset_t)NULL;
1480
1481#if defined(FDT_DTB_STATIC)
1482	/*
1483	 * In case the device tree blob was not retrieved (from metadata) try
1484	 * to use the statically embedded one.
1485	 */
1486	if (dtbp == (vm_offset_t)NULL)
1487		dtbp = (vm_offset_t)&fdt_static_dtb;
1488#endif
1489
1490	if (OF_install(OFW_FDT, 0) == FALSE)
1491		panic("Cannot install FDT");
1492
1493	if (OF_init((void *)dtbp) != 0)
1494		panic("OF_init failed with the found device tree");
1495
1496	/* Grab physical memory regions information from device tree. */
1497	if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0)
1498		panic("Cannot get physical memory regions");
1499	arm_physmem_hardware_regions(mem_regions, mem_regions_sz);
1500
1501	/* Grab reserved memory regions information from device tree. */
1502	if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
1503		arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
1504		    EXFLAG_NODUMP | EXFLAG_NOALLOC);
1505
1506	/* Platform-specific initialisation */
1507	platform_probe_and_attach();
1508
1509	pcpu0_init();
1510
1511	/* Do basic tuning, hz etc */
1512	init_param1();
1513
1514	/* Calculate number of L2 tables needed for mapping vm_page_array */
1515	l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page);
1516	l2size = (l2size >> L1_S_SHIFT) + 1;
1517
1518	/*
1519	 * Add one table for end of kernel map, one for stacks, msgbuf and
1520	 * L1 and L2 tables map and one for vectors map.
1521	 */
1522	l2size += 3;
1523
1524	/* Make it divisible by 4 */
1525	l2size = (l2size + 3) & ~3;
1526
1527	freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK;
1528
1529	/* Define a macro to simplify memory allocation */
1530#define valloc_pages(var, np)						\
1531	alloc_pages((var).pv_va, (np));					\
1532	(var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR);
1533
1534#define alloc_pages(var, np)						\
1535	(var) = freemempos;						\
1536	freemempos += (np * PAGE_SIZE);					\
1537	memset((char *)(var), 0, ((np) * PAGE_SIZE));
1538
1539	while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
1540		freemempos += PAGE_SIZE;
1541	valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
1542
1543	for (i = 0, j = 0; i < l2size; ++i) {
1544		if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
1545			valloc_pages(kernel_pt_table[i],
1546			    L2_TABLE_SIZE / PAGE_SIZE);
1547			j = i;
1548		} else {
1549			kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va +
1550			    L2_TABLE_SIZE_REAL * (i - j);
1551			kernel_pt_table[i].pv_pa =
1552			    kernel_pt_table[i].pv_va - KERNVIRTADDR +
1553			    abp->abp_physaddr;
1554
1555		}
1556	}
1557	/*
1558	 * Allocate a page for the system page mapped to 0x00000000
1559	 * or 0xffff0000. This page will just contain the system vectors
1560	 * and can be shared by all processes.
1561	 */
1562	valloc_pages(systempage, 1);
1563
1564	/* Allocate dynamic per-cpu area. */
1565	valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
1566	dpcpu_init((void *)dpcpu.pv_va, 0);
1567
1568	/* Allocate stacks for all modes */
1569	valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU);
1570	valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU);
1571	valloc_pages(undstack, UND_STACK_SIZE * MAXCPU);
1572	valloc_pages(kernelstack, kstack_pages * MAXCPU);
1573	valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
1574
1575	/*
1576	 * Now we start construction of the L1 page table
1577	 * We start by mapping the L2 page tables into the L1.
1578	 * This means that we can replace L1 mappings later on if necessary
1579	 */
1580	l1pagetable = kernel_l1pt.pv_va;
1581
1582	/*
1583	 * Try to map as much as possible of kernel text and data using
1584	 * 1MB section mapping and for the rest of initial kernel address
1585	 * space use L2 coarse tables.
1586	 *
1587	 * Link L2 tables for mapping remainder of kernel (modulo 1MB)
1588	 * and kernel structures
1589	 */
1590	l2_start = lastaddr & ~(L1_S_OFFSET);
1591	for (i = 0 ; i < l2size - 1; i++)
1592		pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE,
1593		    &kernel_pt_table[i]);
1594
1595	pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE;
1596
1597	/* Map kernel code and data */
1598	pmap_map_chunk(l1pagetable, KERNVIRTADDR, abp->abp_physaddr,
1599	   (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK,
1600	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1601
1602	/* Map L1 directory and allocated L2 page tables */
1603	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
1604	    L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
1605
1606	pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va,
1607	    kernel_pt_table[0].pv_pa,
1608	    L2_TABLE_SIZE_REAL * l2size,
1609	    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
1610
1611	/* Map allocated DPCPU, stacks and msgbuf */
1612	pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa,
1613	    freemempos - dpcpu.pv_va,
1614	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
1615
1616	/* Link and map the vector page */
1617	pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH,
1618	    &kernel_pt_table[l2size - 1]);
1619	pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
1620	    VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE);
1621
1622	/* Establish static device mappings. */
1623	err_devmap = platform_devmap_init();
1624	arm_devmap_bootstrap(l1pagetable, NULL);
1625	vm_max_kernel_address = platform_lastaddr();
1626
1627	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT);
1628	pmap_pa = kernel_l1pt.pv_pa;
1629	setttb(kernel_l1pt.pv_pa);
1630	cpu_tlb_flushID();
1631	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2));
1632
1633	/*
1634	 * Now that proper page tables are installed, call cpu_setup() to enable
1635	 * instruction and data caches and other chip-specific features.
1636	 */
1637	cpu_setup();
1638
1639	/*
1640	 * Only after the SOC registers block is mapped we can perform device
1641	 * tree fixups, as they may attempt to read parameters from hardware.
1642	 */
1643	OF_interpret("perform-fixup", 0);
1644
1645	platform_gpio_init();
1646
1647	cninit();
1648
1649	debugf("initarm: console initialized\n");
1650	debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
1651	debugf(" boothowto = 0x%08x\n", boothowto);
1652	debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
1653	print_kenv();
1654
1655	env = kern_getenv("kernelname");
1656	if (env != NULL) {
1657		strlcpy(kernelname, env, sizeof(kernelname));
1658		freeenv(env);
1659	}
1660
1661	if (err_devmap != 0)
1662		printf("WARNING: could not fully configure devmap, error=%d\n",
1663		    err_devmap);
1664
1665	platform_late_init();
1666
1667	/*
1668	 * Pages were allocated during the secondary bootstrap for the
1669	 * stacks for different CPU modes.
1670	 * We must now set the r13 registers in the different CPU modes to
1671	 * point to these stacks.
1672	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
1673	 * of the stack memory.
1674	 */
1675	cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE);
1676
1677	set_stackptrs(0);
1678
1679	/*
1680	 * We must now clean the cache again....
1681	 * Cleaning may be done by reading new data to displace any
1682	 * dirty data in the cache. This will have happened in setttb()
1683	 * but since we are boot strapping the addresses used for the read
1684	 * may have just been remapped and thus the cache could be out
1685	 * of sync. A re-clean after the switch will cure this.
1686	 * After booting there are no gross relocations of the kernel thus
1687	 * this problem will not occur after initarm().
1688	 */
1689	cpu_idcache_wbinv_all();
1690
1691	undefined_init();
1692
1693	init_proc0(kernelstack.pv_va);
1694
1695	arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
1696	pmap_bootstrap(freemempos, &kernel_l1pt);
1697	msgbufp = (void *)msgbufpv.pv_va;
1698	msgbufinit(msgbufp, msgbufsize);
1699	mutex_init();
1700
1701	/*
1702	 * Exclude the kernel (and all the things we allocated which immediately
1703	 * follow the kernel) from the VM allocation pool but not from crash
1704	 * dumps.  virtual_avail is a global variable which tracks the kva we've
1705	 * "allocated" while setting up pmaps.
1706	 *
1707	 * Prepare the list of physical memory available to the vm subsystem.
1708	 */
1709	arm_physmem_exclude_region(abp->abp_physaddr,
1710	    (virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC);
1711	arm_physmem_init_kernel_globals();
1712
1713	init_param2(physmem);
1714	dbg_monitor_init();
1715	kdb_init();
1716
1717	return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
1718	    sizeof(struct pcb)));
1719}
1720#else /* !ARM_NEW_PMAP */
1721void *
1722initarm(struct arm_boot_params *abp)
1723{
1724	struct mem_region mem_regions[FDT_MEM_REGIONS];
1725	vm_paddr_t lastaddr;
1726	vm_offset_t dtbp, kernelstack, dpcpu;
1727	uint32_t memsize;
1728	char *env;
1729	void *kmdp;
1730	int err_devmap, mem_regions_sz;
1731#ifdef EFI
1732	struct efi_map_header *efihdr;
1733#endif
1734
1735	/* get last allocated physical address */
1736	arm_physmem_kernaddr = abp->abp_physaddr;
1737	lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr;
1738
1739	memsize = 0;
1740	set_cpufuncs();
1741	cpuinfo_init();
1742
1743	/*
1744	 * Find the dtb passed in by the boot loader.
1745	 */
1746	kmdp = preload_search_by_type("elf kernel");
1747	dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
1748#if defined(FDT_DTB_STATIC)
1749	/*
1750	 * In case the device tree blob was not retrieved (from metadata) try
1751	 * to use the statically embedded one.
1752	 */
1753	if (dtbp == (vm_offset_t)NULL)
1754		dtbp = (vm_offset_t)&fdt_static_dtb;
1755#endif
1756
1757	if (OF_install(OFW_FDT, 0) == FALSE)
1758		panic("Cannot install FDT");
1759
1760	if (OF_init((void *)dtbp) != 0)
1761		panic("OF_init failed with the found device tree");
1762
1763#ifdef EFI
1764	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1765	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
1766	if (efihdr != NULL) {
1767		add_efi_map_entries(efihdr, mem_regions, &mem_regions_sz,
1768		   &memsize);
1769	} else
1770#endif
1771	{
1772		/* Grab physical memory regions information from device tree. */
1773		if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
1774		    &memsize) != 0)
1775			panic("Cannot get physical memory regions");
1776	}
1777	arm_physmem_hardware_regions(mem_regions, mem_regions_sz);
1778
1779	/* Grab reserved memory regions information from device tree. */
1780	if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
1781		arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
1782		    EXFLAG_NODUMP | EXFLAG_NOALLOC);
1783
1784	/*
1785	 * Set TEX remapping registers.
1786	 * Setup kernel page tables and switch to kernel L1 page table.
1787	 */
1788	pmap_set_tex();
1789	pmap_bootstrap_prepare(lastaddr);
1790
1791	/*
1792	 * Now that proper page tables are installed, call cpu_setup() to enable
1793	 * instruction and data caches and other chip-specific features.
1794	 */
1795	cpu_setup();
1796
1797	/* Platform-specific initialisation */
1798	platform_probe_and_attach();
1799	pcpu0_init();
1800
1801	/* Do basic tuning, hz etc */
1802	init_param1();
1803
1804	/*
1805	 * Allocate a page for the system page mapped to 0xffff0000
1806	 * This page will just contain the system vectors and can be
1807	 * shared by all processes.
1808	 */
1809	systempage = pmap_preboot_get_pages(1);
1810
1811	/* Map the vector page. */
1812	pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH,  1);
1813	if (virtual_end >= ARM_VECTORS_HIGH)
1814		virtual_end = ARM_VECTORS_HIGH - 1;
1815
1816	/* Allocate dynamic per-cpu area. */
1817	dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE);
1818	dpcpu_init((void *)dpcpu, 0);
1819
1820	/* Allocate stacks for all modes */
1821	irqstack    = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU);
1822	abtstack    = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU);
1823	undstack    = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU );
1824	kernelstack = pmap_preboot_get_vpages(kstack_pages * MAXCPU);
1825
1826	/* Allocate message buffer. */
1827	msgbufp = (void *)pmap_preboot_get_vpages(
1828	    round_page(msgbufsize) / PAGE_SIZE);
1829
1830	/*
1831	 * Pages were allocated during the secondary bootstrap for the
1832	 * stacks for different CPU modes.
1833	 * We must now set the r13 registers in the different CPU modes to
1834	 * point to these stacks.
1835	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
1836	 * of the stack memory.
1837	 */
1838	set_stackptrs(0);
1839	mutex_init();
1840
1841	/* Establish static device mappings. */
1842	err_devmap = platform_devmap_init();
1843	arm_devmap_bootstrap(0, NULL);
1844	vm_max_kernel_address = platform_lastaddr();
1845
1846	/*
1847	 * Only after the SOC registers block is mapped we can perform device
1848	 * tree fixups, as they may attempt to read parameters from hardware.
1849	 */
1850	OF_interpret("perform-fixup", 0);
1851	platform_gpio_init();
1852	cninit();
1853
1854	debugf("initarm: console initialized\n");
1855	debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
1856	debugf(" boothowto = 0x%08x\n", boothowto);
1857	debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
1858	debugf(" lastaddr1: 0x%08x\n", lastaddr);
1859	print_kenv();
1860
1861	env = kern_getenv("kernelname");
1862	if (env != NULL)
1863		strlcpy(kernelname, env, sizeof(kernelname));
1864
1865	if (err_devmap != 0)
1866		printf("WARNING: could not fully configure devmap, error=%d\n",
1867		    err_devmap);
1868
1869	platform_late_init();
1870
1871	/*
1872	 * We must now clean the cache again....
1873	 * Cleaning may be done by reading new data to displace any
1874	 * dirty data in the cache. This will have happened in setttb()
1875	 * but since we are boot strapping the addresses used for the read
1876	 * may have just been remapped and thus the cache could be out
1877	 * of sync. A re-clean after the switch will cure this.
1878	 * After booting there are no gross relocations of the kernel thus
1879	 * this problem will not occur after initarm().
1880	 */
1881	/* Set stack for exception handlers */
1882	undefined_init();
1883	init_proc0(kernelstack);
1884	arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
1885	enable_interrupts(PSR_A);
1886	pmap_bootstrap(0);
1887
1888	/* Exclude the kernel (and all the things we allocated which immediately
1889	 * follow the kernel) from the VM allocation pool but not from crash
1890	 * dumps.  virtual_avail is a global variable which tracks the kva we've
1891	 * "allocated" while setting up pmaps.
1892	 *
1893	 * Prepare the list of physical memory available to the vm subsystem.
1894	 */
1895	arm_physmem_exclude_region(abp->abp_physaddr,
1896		pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC);
1897	arm_physmem_init_kernel_globals();
1898
1899	init_param2(physmem);
1900	/* Init message buffer. */
1901	msgbufinit(msgbufp, msgbufsize);
1902	dbg_monitor_init();
1903	kdb_init();
1904	return ((void *)STACKALIGN(thread0.td_pcb));
1905
1906}
1907
1908#endif /* !ARM_NEW_PMAP */
1909#endif /* FDT */
1910
1911uint32_t (*arm_cpu_fill_vdso_timehands)(struct vdso_timehands *,
1912    struct timecounter *);
1913
1914uint32_t
1915cpu_fill_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc)
1916{
1917
1918	return (arm_cpu_fill_vdso_timehands != NULL ?
1919	    arm_cpu_fill_vdso_timehands(vdso_th, tc) : 0);
1920}
1921