machdep.c revision 322761
1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include "opt_platform.h"
29#include "opt_ddb.h"
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: stable/11/sys/arm64/arm64/machdep.c 322761 2017-08-21 17:35:04Z jhb $");
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/buf.h>
37#include <sys/bus.h>
38#include <sys/cons.h>
39#include <sys/cpu.h>
40#include <sys/devmap.h>
41#include <sys/efi.h>
42#include <sys/exec.h>
43#include <sys/imgact.h>
44#include <sys/kdb.h>
45#include <sys/kernel.h>
46#include <sys/limits.h>
47#include <sys/linker.h>
48#include <sys/msgbuf.h>
49#include <sys/pcpu.h>
50#include <sys/proc.h>
51#include <sys/ptrace.h>
52#include <sys/reboot.h>
53#include <sys/rwlock.h>
54#include <sys/sched.h>
55#include <sys/signalvar.h>
56#include <sys/syscallsubr.h>
57#include <sys/sysent.h>
58#include <sys/sysproto.h>
59#include <sys/ucontext.h>
60#include <sys/vdso.h>
61
62#include <vm/vm.h>
63#include <vm/vm_kern.h>
64#include <vm/vm_object.h>
65#include <vm/vm_page.h>
66#include <vm/pmap.h>
67#include <vm/vm_map.h>
68#include <vm/vm_pager.h>
69
70#include <machine/armreg.h>
71#include <machine/cpu.h>
72#include <machine/debug_monitor.h>
73#include <machine/kdb.h>
74#include <machine/machdep.h>
75#include <machine/metadata.h>
76#include <machine/md_var.h>
77#include <machine/pcb.h>
78#include <machine/reg.h>
79#include <machine/vmparam.h>
80
81#ifdef VFP
82#include <machine/vfp.h>
83#endif
84
85#ifdef FDT
86#include <dev/fdt/fdt_common.h>
87#include <dev/ofw/openfirm.h>
88#endif
89
90struct pcpu __pcpu[MAXCPU];
91
92static struct trapframe proc0_tf;
93
94vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2];
95vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2];
96
97int early_boot = 1;
98int cold = 1;
99long realmem = 0;
100long Maxmem = 0;
101
102#define	PHYSMAP_SIZE	(2 * (VM_PHYSSEG_MAX - 1))
103vm_paddr_t physmap[PHYSMAP_SIZE];
104u_int physmap_idx;
105
106struct kva_md_info kmi;
107
108int64_t dcache_line_size;	/* The minimum D cache line size */
109int64_t icache_line_size;	/* The minimum I cache line size */
110int64_t idcache_line_size;	/* The minimum cache line size */
111int64_t dczva_line_size;	/* The size of cache line the dc zva zeroes */
112int has_pan;
113
114/* pagezero_* implementations are provided in support.S */
115void pagezero_simple(void *);
116void pagezero_cache(void *);
117
118/* pagezero_simple is default pagezero */
119void (*pagezero)(void *p) = pagezero_simple;
120
121static void
122pan_setup(void)
123{
124	uint64_t id_aa64mfr1;
125
126	id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
127	if (ID_AA64MMFR1_PAN(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
128		has_pan = 1;
129}
130
131void
132pan_enable(void)
133{
134
135	/*
136	 * The LLVM integrated assembler doesn't understand the PAN
137	 * PSTATE field. Because of this we need to manually create
138	 * the instruction in an asm block. This is equivalent to:
139	 * msr pan, #1
140	 *
141	 * This sets the PAN bit, stopping the kernel from accessing
142	 * memory when userspace can also access it unless the kernel
143	 * uses the userspace load/store instructions.
144	 */
145	if (has_pan) {
146		WRITE_SPECIALREG(sctlr_el1,
147		    READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
148		__asm __volatile(".inst 0xd500409f | (0x1 << 8)");
149	}
150}
151
152static void
153cpu_startup(void *dummy)
154{
155
156	identify_cpu();
157
158	vm_ksubmap_init(&kmi);
159	bufinit();
160	vm_pager_bufferinit();
161}
162
163SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
164
165int
166cpu_idle_wakeup(int cpu)
167{
168
169	return (0);
170}
171
172int
173fill_regs(struct thread *td, struct reg *regs)
174{
175	struct trapframe *frame;
176
177	frame = td->td_frame;
178	regs->sp = frame->tf_sp;
179	regs->lr = frame->tf_lr;
180	regs->elr = frame->tf_elr;
181	regs->spsr = frame->tf_spsr;
182
183	memcpy(regs->x, frame->tf_x, sizeof(regs->x));
184
185	return (0);
186}
187
188int
189set_regs(struct thread *td, struct reg *regs)
190{
191	struct trapframe *frame;
192
193	frame = td->td_frame;
194	frame->tf_sp = regs->sp;
195	frame->tf_lr = regs->lr;
196	frame->tf_elr = regs->elr;
197	frame->tf_spsr = regs->spsr;
198
199	memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
200
201	return (0);
202}
203
204int
205fill_fpregs(struct thread *td, struct fpreg *regs)
206{
207#ifdef VFP
208	struct pcb *pcb;
209
210	pcb = td->td_pcb;
211	if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
212		/*
213		 * If we have just been running VFP instructions we will
214		 * need to save the state to memcpy it below.
215		 */
216		if (td == curthread)
217			vfp_save_state(td, pcb);
218
219		memcpy(regs->fp_q, pcb->pcb_vfp, sizeof(regs->fp_q));
220		regs->fp_cr = pcb->pcb_fpcr;
221		regs->fp_sr = pcb->pcb_fpsr;
222	} else
223#endif
224		memset(regs->fp_q, 0, sizeof(regs->fp_q));
225	return (0);
226}
227
228int
229set_fpregs(struct thread *td, struct fpreg *regs)
230{
231#ifdef VFP
232	struct pcb *pcb;
233
234	pcb = td->td_pcb;
235	memcpy(pcb->pcb_vfp, regs->fp_q, sizeof(regs->fp_q));
236	pcb->pcb_fpcr = regs->fp_cr;
237	pcb->pcb_fpsr = regs->fp_sr;
238#endif
239	return (0);
240}
241
242int
243fill_dbregs(struct thread *td, struct dbreg *regs)
244{
245
246	printf("ARM64TODO: fill_dbregs");
247	return (EDOOFUS);
248}
249
250int
251set_dbregs(struct thread *td, struct dbreg *regs)
252{
253
254	printf("ARM64TODO: set_dbregs");
255	return (EDOOFUS);
256}
257
258int
259ptrace_set_pc(struct thread *td, u_long addr)
260{
261
262	printf("ARM64TODO: ptrace_set_pc");
263	return (EDOOFUS);
264}
265
266int
267ptrace_single_step(struct thread *td)
268{
269
270	td->td_frame->tf_spsr |= PSR_SS;
271	td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
272	return (0);
273}
274
275int
276ptrace_clear_single_step(struct thread *td)
277{
278
279	td->td_frame->tf_spsr &= ~PSR_SS;
280	td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
281	return (0);
282}
283
284void
285exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
286{
287	struct trapframe *tf = td->td_frame;
288
289	memset(tf, 0, sizeof(struct trapframe));
290
291	/*
292	 * We need to set x0 for init as it doesn't call
293	 * cpu_set_syscall_retval to copy the value. We also
294	 * need to set td_retval for the cases where we do.
295	 */
296	tf->tf_x[0] = td->td_retval[0] = stack;
297	tf->tf_sp = STACKALIGN(stack);
298	tf->tf_lr = imgp->entry_addr;
299	tf->tf_elr = imgp->entry_addr;
300}
301
302/* Sanity check these are the same size, they will be memcpy'd to and fro */
303CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
304    sizeof((struct gpregs *)0)->gp_x);
305CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
306    sizeof((struct reg *)0)->x);
307
308int
309get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
310{
311	struct trapframe *tf = td->td_frame;
312
313	if (clear_ret & GET_MC_CLEAR_RET) {
314		mcp->mc_gpregs.gp_x[0] = 0;
315		mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
316	} else {
317		mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
318		mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
319	}
320
321	memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
322	    sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
323
324	mcp->mc_gpregs.gp_sp = tf->tf_sp;
325	mcp->mc_gpregs.gp_lr = tf->tf_lr;
326	mcp->mc_gpregs.gp_elr = tf->tf_elr;
327
328	return (0);
329}
330
331int
332set_mcontext(struct thread *td, mcontext_t *mcp)
333{
334	struct trapframe *tf = td->td_frame;
335
336	memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
337
338	tf->tf_sp = mcp->mc_gpregs.gp_sp;
339	tf->tf_lr = mcp->mc_gpregs.gp_lr;
340	tf->tf_elr = mcp->mc_gpregs.gp_elr;
341	tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
342
343	return (0);
344}
345
346static void
347get_fpcontext(struct thread *td, mcontext_t *mcp)
348{
349#ifdef VFP
350	struct pcb *curpcb;
351
352	critical_enter();
353
354	curpcb = curthread->td_pcb;
355
356	if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
357		/*
358		 * If we have just been running VFP instructions we will
359		 * need to save the state to memcpy it below.
360		 */
361		vfp_save_state(td, curpcb);
362
363		memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_vfp,
364		    sizeof(mcp->mc_fpregs));
365		mcp->mc_fpregs.fp_cr = curpcb->pcb_fpcr;
366		mcp->mc_fpregs.fp_sr = curpcb->pcb_fpsr;
367		mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
368		mcp->mc_flags |= _MC_FP_VALID;
369	}
370
371	critical_exit();
372#endif
373}
374
375static void
376set_fpcontext(struct thread *td, mcontext_t *mcp)
377{
378#ifdef VFP
379	struct pcb *curpcb;
380
381	critical_enter();
382
383	if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
384		curpcb = curthread->td_pcb;
385
386		/*
387		 * Discard any vfp state for the current thread, we
388		 * are about to override it.
389		 */
390		vfp_discard(td);
391
392		memcpy(curpcb->pcb_vfp, mcp->mc_fpregs.fp_q,
393		    sizeof(mcp->mc_fpregs));
394		curpcb->pcb_fpcr = mcp->mc_fpregs.fp_cr;
395		curpcb->pcb_fpsr = mcp->mc_fpregs.fp_sr;
396		curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags;
397	}
398
399	critical_exit();
400#endif
401}
402
403void
404cpu_idle(int busy)
405{
406
407	spinlock_enter();
408	if (!busy)
409		cpu_idleclock();
410	if (!sched_runnable())
411		__asm __volatile(
412		    "dsb sy \n"
413		    "wfi    \n");
414	if (!busy)
415		cpu_activeclock();
416	spinlock_exit();
417}
418
419void
420cpu_halt(void)
421{
422
423	/* We should have shutdown by now, if not enter a low power sleep */
424	intr_disable();
425	while (1) {
426		__asm __volatile("wfi");
427	}
428}
429
430/*
431 * Flush the D-cache for non-DMA I/O so that the I-cache can
432 * be made coherent later.
433 */
434void
435cpu_flush_dcache(void *ptr, size_t len)
436{
437
438	/* ARM64TODO TBD */
439}
440
441/* Get current clock frequency for the given CPU ID. */
442int
443cpu_est_clockrate(int cpu_id, uint64_t *rate)
444{
445	struct pcpu *pc;
446
447	pc = pcpu_find(cpu_id);
448	if (pc == NULL || rate == NULL)
449		return (EINVAL);
450
451	if (pc->pc_clock == 0)
452		return (EOPNOTSUPP);
453
454	*rate = pc->pc_clock;
455	return (0);
456}
457
458void
459cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
460{
461
462	pcpu->pc_acpi_id = 0xffffffff;
463}
464
465void
466spinlock_enter(void)
467{
468	struct thread *td;
469	register_t daif;
470
471	td = curthread;
472	if (td->td_md.md_spinlock_count == 0) {
473		daif = intr_disable();
474		td->td_md.md_spinlock_count = 1;
475		td->td_md.md_saved_daif = daif;
476	} else
477		td->td_md.md_spinlock_count++;
478	critical_enter();
479}
480
481void
482spinlock_exit(void)
483{
484	struct thread *td;
485	register_t daif;
486
487	td = curthread;
488	critical_exit();
489	daif = td->td_md.md_saved_daif;
490	td->td_md.md_spinlock_count--;
491	if (td->td_md.md_spinlock_count == 0)
492		intr_restore(daif);
493}
494
495#ifndef	_SYS_SYSPROTO_H_
496struct sigreturn_args {
497	ucontext_t *ucp;
498};
499#endif
500
501int
502sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
503{
504	ucontext_t uc;
505	uint32_t spsr;
506
507	if (uap == NULL)
508		return (EFAULT);
509	if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
510		return (EFAULT);
511
512	spsr = uc.uc_mcontext.mc_gpregs.gp_spsr;
513	if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
514	    (spsr & (PSR_F | PSR_I | PSR_A | PSR_D)) != 0)
515		return (EINVAL);
516
517	set_mcontext(td, &uc.uc_mcontext);
518	set_fpcontext(td, &uc.uc_mcontext);
519
520	/* Restore signal mask. */
521	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
522
523	return (EJUSTRETURN);
524}
525
526/*
527 * Construct a PCB from a trapframe. This is called from kdb_trap() where
528 * we want to start a backtrace from the function that caused us to enter
529 * the debugger. We have the context in the trapframe, but base the trace
530 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
531 * enough for a backtrace.
532 */
533void
534makectx(struct trapframe *tf, struct pcb *pcb)
535{
536	int i;
537
538	for (i = 0; i < PCB_LR; i++)
539		pcb->pcb_x[i] = tf->tf_x[i];
540
541	pcb->pcb_x[PCB_LR] = tf->tf_lr;
542	pcb->pcb_pc = tf->tf_elr;
543	pcb->pcb_sp = tf->tf_sp;
544}
545
546void
547sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
548{
549	struct thread *td;
550	struct proc *p;
551	struct trapframe *tf;
552	struct sigframe *fp, frame;
553	struct sigacts *psp;
554	struct sysentvec *sysent;
555	int code, onstack, sig;
556
557	td = curthread;
558	p = td->td_proc;
559	PROC_LOCK_ASSERT(p, MA_OWNED);
560
561	sig = ksi->ksi_signo;
562	code = ksi->ksi_code;
563	psp = p->p_sigacts;
564	mtx_assert(&psp->ps_mtx, MA_OWNED);
565
566	tf = td->td_frame;
567	onstack = sigonstack(tf->tf_sp);
568
569	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
570	    catcher, sig);
571
572	/* Allocate and validate space for the signal handler context. */
573	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
574	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
575		fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
576		    td->td_sigstk.ss_size);
577#if defined(COMPAT_43)
578		td->td_sigstk.ss_flags |= SS_ONSTACK;
579#endif
580	} else {
581		fp = (struct sigframe *)td->td_frame->tf_sp;
582	}
583
584	/* Make room, keeping the stack aligned */
585	fp--;
586	fp = (struct sigframe *)STACKALIGN(fp);
587
588	/* Fill in the frame to copy out */
589	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
590	get_fpcontext(td, &frame.sf_uc.uc_mcontext);
591	frame.sf_si = ksi->ksi_info;
592	frame.sf_uc.uc_sigmask = *mask;
593	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
594	    ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
595	frame.sf_uc.uc_stack = td->td_sigstk;
596	mtx_unlock(&psp->ps_mtx);
597	PROC_UNLOCK(td->td_proc);
598
599	/* Copy the sigframe out to the user's stack. */
600	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
601		/* Process has trashed its stack. Kill it. */
602		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
603		PROC_LOCK(p);
604		sigexit(td, SIGILL);
605	}
606
607	tf->tf_x[0]= sig;
608	tf->tf_x[1] = (register_t)&fp->sf_si;
609	tf->tf_x[2] = (register_t)&fp->sf_uc;
610
611	tf->tf_elr = (register_t)catcher;
612	tf->tf_sp = (register_t)fp;
613	sysent = p->p_sysent;
614	if (sysent->sv_sigcode_base != 0)
615		tf->tf_lr = (register_t)sysent->sv_sigcode_base;
616	else
617		tf->tf_lr = (register_t)(sysent->sv_psstrings -
618		    *(sysent->sv_szsigcode));
619
620	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
621	    tf->tf_sp);
622
623	PROC_LOCK(p);
624	mtx_lock(&psp->ps_mtx);
625}
626
627static void
628init_proc0(vm_offset_t kstack)
629{
630	struct pcpu *pcpup = &__pcpu[0];
631
632	proc_linkup0(&proc0, &thread0);
633	thread0.td_kstack = kstack;
634	thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1;
635	thread0.td_pcb->pcb_fpflags = 0;
636	thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
637	thread0.td_frame = &proc0_tf;
638	pcpup->pc_curpcb = thread0.td_pcb;
639}
640
641typedef struct {
642	uint32_t type;
643	uint64_t phys_start;
644	uint64_t virt_start;
645	uint64_t num_pages;
646	uint64_t attr;
647} EFI_MEMORY_DESCRIPTOR;
648
649static int
650add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
651    u_int *physmap_idxp)
652{
653	u_int i, insert_idx, _physmap_idx;
654
655	_physmap_idx = *physmap_idxp;
656
657	if (length == 0)
658		return (1);
659
660	/*
661	 * Find insertion point while checking for overlap.  Start off by
662	 * assuming the new entry will be added to the end.
663	 */
664	insert_idx = _physmap_idx;
665	for (i = 0; i <= _physmap_idx; i += 2) {
666		if (base < physmap[i + 1]) {
667			if (base + length <= physmap[i]) {
668				insert_idx = i;
669				break;
670			}
671			if (boothowto & RB_VERBOSE)
672				printf(
673		    "Overlapping memory regions, ignoring second region\n");
674			return (1);
675		}
676	}
677
678	/* See if we can prepend to the next entry. */
679	if (insert_idx <= _physmap_idx &&
680	    base + length == physmap[insert_idx]) {
681		physmap[insert_idx] = base;
682		return (1);
683	}
684
685	/* See if we can append to the previous entry. */
686	if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
687		physmap[insert_idx - 1] += length;
688		return (1);
689	}
690
691	_physmap_idx += 2;
692	*physmap_idxp = _physmap_idx;
693	if (_physmap_idx == PHYSMAP_SIZE) {
694		printf(
695		"Too many segments in the physical address map, giving up\n");
696		return (0);
697	}
698
699	/*
700	 * Move the last 'N' entries down to make room for the new
701	 * entry if needed.
702	 */
703	for (i = _physmap_idx; i > insert_idx; i -= 2) {
704		physmap[i] = physmap[i - 2];
705		physmap[i + 1] = physmap[i - 1];
706	}
707
708	/* Insert the new entry. */
709	physmap[insert_idx] = base;
710	physmap[insert_idx + 1] = base + length;
711	return (1);
712}
713
714#ifdef FDT
715static void
716add_fdt_mem_regions(struct mem_region *mr, int mrcnt, vm_paddr_t *physmap,
717    u_int *physmap_idxp)
718{
719
720	for (int i = 0; i < mrcnt; i++) {
721		if (!add_physmap_entry(mr[i].mr_start, mr[i].mr_size, physmap,
722		    physmap_idxp))
723			break;
724	}
725}
726#endif
727
728static void
729add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
730    u_int *physmap_idxp)
731{
732	struct efi_md *map, *p;
733	const char *type;
734	size_t efisz;
735	int ndesc, i;
736
737	static const char *types[] = {
738		"Reserved",
739		"LoaderCode",
740		"LoaderData",
741		"BootServicesCode",
742		"BootServicesData",
743		"RuntimeServicesCode",
744		"RuntimeServicesData",
745		"ConventionalMemory",
746		"UnusableMemory",
747		"ACPIReclaimMemory",
748		"ACPIMemoryNVS",
749		"MemoryMappedIO",
750		"MemoryMappedIOPortSpace",
751		"PalCode",
752		"PersistentMemory"
753	};
754
755	/*
756	 * Memory map data provided by UEFI via the GetMemoryMap
757	 * Boot Services API.
758	 */
759	efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
760	map = (struct efi_md *)((uint8_t *)efihdr + efisz);
761
762	if (efihdr->descriptor_size == 0)
763		return;
764	ndesc = efihdr->memory_size / efihdr->descriptor_size;
765
766	if (boothowto & RB_VERBOSE)
767		printf("%23s %12s %12s %8s %4s\n",
768		    "Type", "Physical", "Virtual", "#Pages", "Attr");
769
770	for (i = 0, p = map; i < ndesc; i++,
771	    p = efi_next_descriptor(p, efihdr->descriptor_size)) {
772		if (boothowto & RB_VERBOSE) {
773			if (p->md_type < nitems(types))
774				type = types[p->md_type];
775			else
776				type = "<INVALID>";
777			printf("%23s %012lx %12p %08lx ", type, p->md_phys,
778			    p->md_virt, p->md_pages);
779			if (p->md_attr & EFI_MD_ATTR_UC)
780				printf("UC ");
781			if (p->md_attr & EFI_MD_ATTR_WC)
782				printf("WC ");
783			if (p->md_attr & EFI_MD_ATTR_WT)
784				printf("WT ");
785			if (p->md_attr & EFI_MD_ATTR_WB)
786				printf("WB ");
787			if (p->md_attr & EFI_MD_ATTR_UCE)
788				printf("UCE ");
789			if (p->md_attr & EFI_MD_ATTR_WP)
790				printf("WP ");
791			if (p->md_attr & EFI_MD_ATTR_RP)
792				printf("RP ");
793			if (p->md_attr & EFI_MD_ATTR_XP)
794				printf("XP ");
795			if (p->md_attr & EFI_MD_ATTR_NV)
796				printf("NV ");
797			if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
798				printf("MORE_RELIABLE ");
799			if (p->md_attr & EFI_MD_ATTR_RO)
800				printf("RO ");
801			if (p->md_attr & EFI_MD_ATTR_RT)
802				printf("RUNTIME");
803			printf("\n");
804		}
805
806		switch (p->md_type) {
807		case EFI_MD_TYPE_CODE:
808		case EFI_MD_TYPE_DATA:
809		case EFI_MD_TYPE_BS_CODE:
810		case EFI_MD_TYPE_BS_DATA:
811		case EFI_MD_TYPE_FREE:
812			/*
813			 * We're allowed to use any entry with these types.
814			 */
815			break;
816		default:
817			continue;
818		}
819
820		if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
821		    physmap, physmap_idxp))
822			break;
823	}
824}
825
826#ifdef FDT
827static void
828try_load_dtb(caddr_t kmdp)
829{
830	vm_offset_t dtbp;
831
832	dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
833	if (dtbp == (vm_offset_t)NULL) {
834		printf("ERROR loading DTB\n");
835		return;
836	}
837
838	if (OF_install(OFW_FDT, 0) == FALSE)
839		panic("Cannot install FDT");
840
841	if (OF_init((void *)dtbp) != 0)
842		panic("OF_init failed with the found device tree");
843}
844#endif
845
846static void
847cache_setup(void)
848{
849	int dcache_line_shift, icache_line_shift, dczva_line_shift;
850	uint32_t ctr_el0;
851	uint32_t dczid_el0;
852
853	ctr_el0 = READ_SPECIALREG(ctr_el0);
854
855	/* Read the log2 words in each D cache line */
856	dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
857	/* Get the D cache line size */
858	dcache_line_size = sizeof(int) << dcache_line_shift;
859
860	/* And the same for the I cache */
861	icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
862	icache_line_size = sizeof(int) << icache_line_shift;
863
864	idcache_line_size = MIN(dcache_line_size, icache_line_size);
865
866	dczid_el0 = READ_SPECIALREG(dczid_el0);
867
868	/* Check if dc zva is not prohibited */
869	if (dczid_el0 & DCZID_DZP)
870		dczva_line_size = 0;
871	else {
872		/* Same as with above calculations */
873		dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
874		dczva_line_size = sizeof(int) << dczva_line_shift;
875
876		/* Change pagezero function */
877		pagezero = pagezero_cache;
878	}
879}
880
881void
882initarm(struct arm64_bootparams *abp)
883{
884	struct efi_map_header *efihdr;
885	struct pcpu *pcpup;
886#ifdef FDT
887	struct mem_region mem_regions[FDT_MEM_REGIONS];
888	int mem_regions_sz;
889#endif
890	vm_offset_t lastaddr;
891	caddr_t kmdp;
892	vm_paddr_t mem_len;
893	int i;
894
895	/* Set the module data location */
896	preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
897
898	/* Find the kernel address */
899	kmdp = preload_search_by_type("elf kernel");
900	if (kmdp == NULL)
901		kmdp = preload_search_by_type("elf64 kernel");
902
903	boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
904	init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0);
905
906#ifdef FDT
907	try_load_dtb(kmdp);
908#endif
909
910	/* Find the address to start allocating from */
911	lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
912
913	/* Load the physical memory ranges */
914	physmap_idx = 0;
915	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
916	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
917	if (efihdr != NULL)
918		add_efi_map_entries(efihdr, physmap, &physmap_idx);
919#ifdef FDT
920	else {
921		/* Grab physical memory regions information from device tree. */
922		if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
923		    NULL) != 0)
924			panic("Cannot get physical memory regions");
925		add_fdt_mem_regions(mem_regions, mem_regions_sz, physmap,
926		    &physmap_idx);
927	}
928#endif
929
930	/* Print the memory map */
931	mem_len = 0;
932	for (i = 0; i < physmap_idx; i += 2) {
933		dump_avail[i] = physmap[i];
934		dump_avail[i + 1] = physmap[i + 1];
935		mem_len += physmap[i + 1] - physmap[i];
936	}
937	dump_avail[i] = 0;
938	dump_avail[i + 1] = 0;
939
940	/* Set the pcpu data, this is needed by pmap_bootstrap */
941	pcpup = &__pcpu[0];
942	pcpu_init(pcpup, 0, sizeof(struct pcpu));
943
944	/*
945	 * Set the pcpu pointer with a backup in tpidr_el1 to be
946	 * loaded when entering the kernel from userland.
947	 */
948	__asm __volatile(
949	    "mov x18, %0 \n"
950	    "msr tpidr_el1, %0" :: "r"(pcpup));
951
952	PCPU_SET(curthread, &thread0);
953
954	/* Do basic tuning, hz etc */
955	init_param1();
956
957	cache_setup();
958	pan_setup();
959
960	/* Bootstrap enough of pmap  to enter the kernel proper */
961	pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
962	    KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
963
964	devmap_bootstrap(0, NULL);
965
966	cninit();
967
968	init_proc0(abp->kern_stack);
969	msgbufinit(msgbufp, msgbufsize);
970	mutex_init();
971	init_param2(physmem);
972
973	dbg_init();
974	kdb_init();
975	pan_enable();
976
977	early_boot = 0;
978}
979
980void
981dbg_init(void)
982{
983
984	/* Clear OS lock */
985	WRITE_SPECIALREG(OSLAR_EL1, 0);
986
987	/* This permits DDB to use debug registers for watchpoints. */
988	dbg_monitor_init();
989
990	/* TODO: Eventually will need to initialize debug registers here. */
991}
992
993#ifdef DDB
994#include <ddb/ddb.h>
995
996DB_SHOW_COMMAND(specialregs, db_show_spregs)
997{
998#define	PRINT_REG(reg)	\
999    db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
1000
1001	PRINT_REG(actlr_el1);
1002	PRINT_REG(afsr0_el1);
1003	PRINT_REG(afsr1_el1);
1004	PRINT_REG(aidr_el1);
1005	PRINT_REG(amair_el1);
1006	PRINT_REG(ccsidr_el1);
1007	PRINT_REG(clidr_el1);
1008	PRINT_REG(contextidr_el1);
1009	PRINT_REG(cpacr_el1);
1010	PRINT_REG(csselr_el1);
1011	PRINT_REG(ctr_el0);
1012	PRINT_REG(currentel);
1013	PRINT_REG(daif);
1014	PRINT_REG(dczid_el0);
1015	PRINT_REG(elr_el1);
1016	PRINT_REG(esr_el1);
1017	PRINT_REG(far_el1);
1018#if 0
1019	/* ARM64TODO: Enable VFP before reading floating-point registers */
1020	PRINT_REG(fpcr);
1021	PRINT_REG(fpsr);
1022#endif
1023	PRINT_REG(id_aa64afr0_el1);
1024	PRINT_REG(id_aa64afr1_el1);
1025	PRINT_REG(id_aa64dfr0_el1);
1026	PRINT_REG(id_aa64dfr1_el1);
1027	PRINT_REG(id_aa64isar0_el1);
1028	PRINT_REG(id_aa64isar1_el1);
1029	PRINT_REG(id_aa64pfr0_el1);
1030	PRINT_REG(id_aa64pfr1_el1);
1031	PRINT_REG(id_afr0_el1);
1032	PRINT_REG(id_dfr0_el1);
1033	PRINT_REG(id_isar0_el1);
1034	PRINT_REG(id_isar1_el1);
1035	PRINT_REG(id_isar2_el1);
1036	PRINT_REG(id_isar3_el1);
1037	PRINT_REG(id_isar4_el1);
1038	PRINT_REG(id_isar5_el1);
1039	PRINT_REG(id_mmfr0_el1);
1040	PRINT_REG(id_mmfr1_el1);
1041	PRINT_REG(id_mmfr2_el1);
1042	PRINT_REG(id_mmfr3_el1);
1043#if 0
1044	/* Missing from llvm */
1045	PRINT_REG(id_mmfr4_el1);
1046#endif
1047	PRINT_REG(id_pfr0_el1);
1048	PRINT_REG(id_pfr1_el1);
1049	PRINT_REG(isr_el1);
1050	PRINT_REG(mair_el1);
1051	PRINT_REG(midr_el1);
1052	PRINT_REG(mpidr_el1);
1053	PRINT_REG(mvfr0_el1);
1054	PRINT_REG(mvfr1_el1);
1055	PRINT_REG(mvfr2_el1);
1056	PRINT_REG(revidr_el1);
1057	PRINT_REG(sctlr_el1);
1058	PRINT_REG(sp_el0);
1059	PRINT_REG(spsel);
1060	PRINT_REG(spsr_el1);
1061	PRINT_REG(tcr_el1);
1062	PRINT_REG(tpidr_el0);
1063	PRINT_REG(tpidr_el1);
1064	PRINT_REG(tpidrro_el0);
1065	PRINT_REG(ttbr0_el1);
1066	PRINT_REG(ttbr1_el1);
1067	PRINT_REG(vbar_el1);
1068#undef PRINT_REG
1069}
1070
1071DB_SHOW_COMMAND(vtop, db_show_vtop)
1072{
1073	uint64_t phys;
1074
1075	if (have_addr) {
1076		phys = arm64_address_translate_s1e1r(addr);
1077		db_printf("Physical address reg (read):  0x%016lx\n", phys);
1078		phys = arm64_address_translate_s1e1w(addr);
1079		db_printf("Physical address reg (write): 0x%016lx\n", phys);
1080	} else
1081		db_printf("show vtop <virt_addr>\n");
1082}
1083#endif
1084