machdep.c revision 319202
1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include "opt_platform.h"
29#include "opt_ddb.h"
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: stable/11/sys/arm64/arm64/machdep.c 319202 2017-05-30 12:26:36Z andrew $");
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/buf.h>
37#include <sys/bus.h>
38#include <sys/cons.h>
39#include <sys/cpu.h>
40#include <sys/devmap.h>
41#include <sys/efi.h>
42#include <sys/exec.h>
43#include <sys/imgact.h>
44#include <sys/kdb.h>
45#include <sys/kernel.h>
46#include <sys/limits.h>
47#include <sys/linker.h>
48#include <sys/msgbuf.h>
49#include <sys/pcpu.h>
50#include <sys/proc.h>
51#include <sys/ptrace.h>
52#include <sys/reboot.h>
53#include <sys/rwlock.h>
54#include <sys/sched.h>
55#include <sys/signalvar.h>
56#include <sys/syscallsubr.h>
57#include <sys/sysent.h>
58#include <sys/sysproto.h>
59#include <sys/ucontext.h>
60#include <sys/vdso.h>
61
62#include <vm/vm.h>
63#include <vm/vm_kern.h>
64#include <vm/vm_object.h>
65#include <vm/vm_page.h>
66#include <vm/pmap.h>
67#include <vm/vm_map.h>
68#include <vm/vm_pager.h>
69
70#include <machine/armreg.h>
71#include <machine/cpu.h>
72#include <machine/debug_monitor.h>
73#include <machine/kdb.h>
74#include <machine/machdep.h>
75#include <machine/metadata.h>
76#include <machine/md_var.h>
77#include <machine/pcb.h>
78#include <machine/reg.h>
79#include <machine/vmparam.h>
80
81#ifdef VFP
82#include <machine/vfp.h>
83#endif
84
85#ifdef FDT
86#include <dev/fdt/fdt_common.h>
87#include <dev/ofw/openfirm.h>
88#endif
89
90struct pcpu __pcpu[MAXCPU];
91
92static struct trapframe proc0_tf;
93
94vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2];
95vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2];
96
97int early_boot = 1;
98int cold = 1;
99long realmem = 0;
100long Maxmem = 0;
101
102#define	PHYSMAP_SIZE	(2 * (VM_PHYSSEG_MAX - 1))
103vm_paddr_t physmap[PHYSMAP_SIZE];
104u_int physmap_idx;
105
106struct kva_md_info kmi;
107
108int64_t dcache_line_size;	/* The minimum D cache line size */
109int64_t icache_line_size;	/* The minimum I cache line size */
110int64_t idcache_line_size;	/* The minimum cache line size */
111int64_t dczva_line_size;	/* The size of cache line the dc zva zeroes */
112int has_pan;
113
114/* pagezero_* implementations are provided in support.S */
115void pagezero_simple(void *);
116void pagezero_cache(void *);
117
118/* pagezero_simple is default pagezero */
119void (*pagezero)(void *p) = pagezero_simple;
120
121static void
122pan_setup(void)
123{
124	uint64_t id_aa64mfr1;
125
126	id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
127	if (ID_AA64MMFR1_PAN(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
128		has_pan = 1;
129}
130
131void
132pan_enable(void)
133{
134
135	/*
136	 * The LLVM integrated assembler doesn't understand the PAN
137	 * PSTATE field. Because of this we need to manually create
138	 * the instruction in an asm block. This is equivalent to:
139	 * msr pan, #1
140	 *
141	 * This sets the PAN bit, stopping the kernel from accessing
142	 * memory when userspace can also access it unless the kernel
143	 * uses the userspace load/store instructions.
144	 */
145	if (has_pan) {
146		WRITE_SPECIALREG(sctlr_el1,
147		    READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
148		__asm __volatile(".inst 0xd500409f | (0x1 << 8)");
149	}
150}
151
152static void
153cpu_startup(void *dummy)
154{
155
156	identify_cpu();
157
158	vm_ksubmap_init(&kmi);
159	bufinit();
160	vm_pager_bufferinit();
161}
162
163SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
164
165int
166cpu_idle_wakeup(int cpu)
167{
168
169	return (0);
170}
171
172int
173fill_regs(struct thread *td, struct reg *regs)
174{
175	struct trapframe *frame;
176
177	frame = td->td_frame;
178	regs->sp = frame->tf_sp;
179	regs->lr = frame->tf_lr;
180	regs->elr = frame->tf_elr;
181	regs->spsr = frame->tf_spsr;
182
183	memcpy(regs->x, frame->tf_x, sizeof(regs->x));
184
185	return (0);
186}
187
188int
189set_regs(struct thread *td, struct reg *regs)
190{
191	struct trapframe *frame;
192
193	frame = td->td_frame;
194	frame->tf_sp = regs->sp;
195	frame->tf_lr = regs->lr;
196	frame->tf_elr = regs->elr;
197	frame->tf_spsr = regs->spsr;
198
199	memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
200
201	return (0);
202}
203
204int
205fill_fpregs(struct thread *td, struct fpreg *regs)
206{
207#ifdef VFP
208	struct pcb *pcb;
209
210	pcb = td->td_pcb;
211	if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
212		/*
213		 * If we have just been running VFP instructions we will
214		 * need to save the state to memcpy it below.
215		 */
216		vfp_save_state(td, pcb);
217
218		memcpy(regs->fp_q, pcb->pcb_vfp, sizeof(regs->fp_q));
219		regs->fp_cr = pcb->pcb_fpcr;
220		regs->fp_sr = pcb->pcb_fpsr;
221	} else
222#endif
223		memset(regs->fp_q, 0, sizeof(regs->fp_q));
224	return (0);
225}
226
227int
228set_fpregs(struct thread *td, struct fpreg *regs)
229{
230#ifdef VFP
231	struct pcb *pcb;
232
233	pcb = td->td_pcb;
234	memcpy(pcb->pcb_vfp, regs->fp_q, sizeof(regs->fp_q));
235	pcb->pcb_fpcr = regs->fp_cr;
236	pcb->pcb_fpsr = regs->fp_sr;
237#endif
238	return (0);
239}
240
241int
242fill_dbregs(struct thread *td, struct dbreg *regs)
243{
244
245	panic("ARM64TODO: fill_dbregs");
246}
247
248int
249set_dbregs(struct thread *td, struct dbreg *regs)
250{
251
252	panic("ARM64TODO: set_dbregs");
253}
254
255int
256ptrace_set_pc(struct thread *td, u_long addr)
257{
258
259	panic("ARM64TODO: ptrace_set_pc");
260	return (0);
261}
262
263int
264ptrace_single_step(struct thread *td)
265{
266
267	td->td_frame->tf_spsr |= PSR_SS;
268	td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
269	return (0);
270}
271
272int
273ptrace_clear_single_step(struct thread *td)
274{
275
276	td->td_frame->tf_spsr &= ~PSR_SS;
277	td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
278	return (0);
279}
280
281void
282exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
283{
284	struct trapframe *tf = td->td_frame;
285
286	memset(tf, 0, sizeof(struct trapframe));
287
288	/*
289	 * We need to set x0 for init as it doesn't call
290	 * cpu_set_syscall_retval to copy the value. We also
291	 * need to set td_retval for the cases where we do.
292	 */
293	tf->tf_x[0] = td->td_retval[0] = stack;
294	tf->tf_sp = STACKALIGN(stack);
295	tf->tf_lr = imgp->entry_addr;
296	tf->tf_elr = imgp->entry_addr;
297}
298
299/* Sanity check these are the same size, they will be memcpy'd to and fro */
300CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
301    sizeof((struct gpregs *)0)->gp_x);
302CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
303    sizeof((struct reg *)0)->x);
304
305int
306get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
307{
308	struct trapframe *tf = td->td_frame;
309
310	if (clear_ret & GET_MC_CLEAR_RET) {
311		mcp->mc_gpregs.gp_x[0] = 0;
312		mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
313	} else {
314		mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
315		mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
316	}
317
318	memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
319	    sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
320
321	mcp->mc_gpregs.gp_sp = tf->tf_sp;
322	mcp->mc_gpregs.gp_lr = tf->tf_lr;
323	mcp->mc_gpregs.gp_elr = tf->tf_elr;
324
325	return (0);
326}
327
328int
329set_mcontext(struct thread *td, mcontext_t *mcp)
330{
331	struct trapframe *tf = td->td_frame;
332
333	memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
334
335	tf->tf_sp = mcp->mc_gpregs.gp_sp;
336	tf->tf_lr = mcp->mc_gpregs.gp_lr;
337	tf->tf_elr = mcp->mc_gpregs.gp_elr;
338	tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
339
340	return (0);
341}
342
343static void
344get_fpcontext(struct thread *td, mcontext_t *mcp)
345{
346#ifdef VFP
347	struct pcb *curpcb;
348
349	critical_enter();
350
351	curpcb = curthread->td_pcb;
352
353	if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
354		/*
355		 * If we have just been running VFP instructions we will
356		 * need to save the state to memcpy it below.
357		 */
358		vfp_save_state(td, curpcb);
359
360		memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_vfp,
361		    sizeof(mcp->mc_fpregs));
362		mcp->mc_fpregs.fp_cr = curpcb->pcb_fpcr;
363		mcp->mc_fpregs.fp_sr = curpcb->pcb_fpsr;
364		mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
365		mcp->mc_flags |= _MC_FP_VALID;
366	}
367
368	critical_exit();
369#endif
370}
371
372static void
373set_fpcontext(struct thread *td, mcontext_t *mcp)
374{
375#ifdef VFP
376	struct pcb *curpcb;
377
378	critical_enter();
379
380	if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
381		curpcb = curthread->td_pcb;
382
383		/*
384		 * Discard any vfp state for the current thread, we
385		 * are about to override it.
386		 */
387		vfp_discard(td);
388
389		memcpy(curpcb->pcb_vfp, mcp->mc_fpregs.fp_q,
390		    sizeof(mcp->mc_fpregs));
391		curpcb->pcb_fpcr = mcp->mc_fpregs.fp_cr;
392		curpcb->pcb_fpsr = mcp->mc_fpregs.fp_sr;
393		curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags;
394	}
395
396	critical_exit();
397#endif
398}
399
400void
401cpu_idle(int busy)
402{
403
404	spinlock_enter();
405	if (!busy)
406		cpu_idleclock();
407	if (!sched_runnable())
408		__asm __volatile(
409		    "dsb sy \n"
410		    "wfi    \n");
411	if (!busy)
412		cpu_activeclock();
413	spinlock_exit();
414}
415
416void
417cpu_halt(void)
418{
419
420	/* We should have shutdown by now, if not enter a low power sleep */
421	intr_disable();
422	while (1) {
423		__asm __volatile("wfi");
424	}
425}
426
427/*
428 * Flush the D-cache for non-DMA I/O so that the I-cache can
429 * be made coherent later.
430 */
431void
432cpu_flush_dcache(void *ptr, size_t len)
433{
434
435	/* ARM64TODO TBD */
436}
437
438/* Get current clock frequency for the given CPU ID. */
439int
440cpu_est_clockrate(int cpu_id, uint64_t *rate)
441{
442	struct pcpu *pc;
443
444	pc = pcpu_find(cpu_id);
445	if (pc == NULL || rate == NULL)
446		return (EINVAL);
447
448	if (pc->pc_clock == 0)
449		return (EOPNOTSUPP);
450
451	*rate = pc->pc_clock;
452	return (0);
453}
454
455void
456cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
457{
458
459	pcpu->pc_acpi_id = 0xffffffff;
460}
461
462void
463spinlock_enter(void)
464{
465	struct thread *td;
466	register_t daif;
467
468	td = curthread;
469	if (td->td_md.md_spinlock_count == 0) {
470		daif = intr_disable();
471		td->td_md.md_spinlock_count = 1;
472		td->td_md.md_saved_daif = daif;
473	} else
474		td->td_md.md_spinlock_count++;
475	critical_enter();
476}
477
478void
479spinlock_exit(void)
480{
481	struct thread *td;
482	register_t daif;
483
484	td = curthread;
485	critical_exit();
486	daif = td->td_md.md_saved_daif;
487	td->td_md.md_spinlock_count--;
488	if (td->td_md.md_spinlock_count == 0)
489		intr_restore(daif);
490}
491
492#ifndef	_SYS_SYSPROTO_H_
493struct sigreturn_args {
494	ucontext_t *ucp;
495};
496#endif
497
498int
499sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
500{
501	ucontext_t uc;
502	uint32_t spsr;
503
504	if (uap == NULL)
505		return (EFAULT);
506	if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
507		return (EFAULT);
508
509	spsr = uc.uc_mcontext.mc_gpregs.gp_spsr;
510	if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
511	    (spsr & (PSR_F | PSR_I | PSR_A | PSR_D)) != 0)
512		return (EINVAL);
513
514	set_mcontext(td, &uc.uc_mcontext);
515	set_fpcontext(td, &uc.uc_mcontext);
516
517	/* Restore signal mask. */
518	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
519
520	return (EJUSTRETURN);
521}
522
523/*
524 * Construct a PCB from a trapframe. This is called from kdb_trap() where
525 * we want to start a backtrace from the function that caused us to enter
526 * the debugger. We have the context in the trapframe, but base the trace
527 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
528 * enough for a backtrace.
529 */
530void
531makectx(struct trapframe *tf, struct pcb *pcb)
532{
533	int i;
534
535	for (i = 0; i < PCB_LR; i++)
536		pcb->pcb_x[i] = tf->tf_x[i];
537
538	pcb->pcb_x[PCB_LR] = tf->tf_lr;
539	pcb->pcb_pc = tf->tf_elr;
540	pcb->pcb_sp = tf->tf_sp;
541}
542
543void
544sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
545{
546	struct thread *td;
547	struct proc *p;
548	struct trapframe *tf;
549	struct sigframe *fp, frame;
550	struct sigacts *psp;
551	struct sysentvec *sysent;
552	int code, onstack, sig;
553
554	td = curthread;
555	p = td->td_proc;
556	PROC_LOCK_ASSERT(p, MA_OWNED);
557
558	sig = ksi->ksi_signo;
559	code = ksi->ksi_code;
560	psp = p->p_sigacts;
561	mtx_assert(&psp->ps_mtx, MA_OWNED);
562
563	tf = td->td_frame;
564	onstack = sigonstack(tf->tf_sp);
565
566	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
567	    catcher, sig);
568
569	/* Allocate and validate space for the signal handler context. */
570	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
571	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
572		fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
573		    td->td_sigstk.ss_size);
574#if defined(COMPAT_43)
575		td->td_sigstk.ss_flags |= SS_ONSTACK;
576#endif
577	} else {
578		fp = (struct sigframe *)td->td_frame->tf_sp;
579	}
580
581	/* Make room, keeping the stack aligned */
582	fp--;
583	fp = (struct sigframe *)STACKALIGN(fp);
584
585	/* Fill in the frame to copy out */
586	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
587	get_fpcontext(td, &frame.sf_uc.uc_mcontext);
588	frame.sf_si = ksi->ksi_info;
589	frame.sf_uc.uc_sigmask = *mask;
590	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
591	    ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
592	frame.sf_uc.uc_stack = td->td_sigstk;
593	mtx_unlock(&psp->ps_mtx);
594	PROC_UNLOCK(td->td_proc);
595
596	/* Copy the sigframe out to the user's stack. */
597	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
598		/* Process has trashed its stack. Kill it. */
599		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
600		PROC_LOCK(p);
601		sigexit(td, SIGILL);
602	}
603
604	tf->tf_x[0]= sig;
605	tf->tf_x[1] = (register_t)&fp->sf_si;
606	tf->tf_x[2] = (register_t)&fp->sf_uc;
607
608	tf->tf_elr = (register_t)catcher;
609	tf->tf_sp = (register_t)fp;
610	sysent = p->p_sysent;
611	if (sysent->sv_sigcode_base != 0)
612		tf->tf_lr = (register_t)sysent->sv_sigcode_base;
613	else
614		tf->tf_lr = (register_t)(sysent->sv_psstrings -
615		    *(sysent->sv_szsigcode));
616
617	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
618	    tf->tf_sp);
619
620	PROC_LOCK(p);
621	mtx_lock(&psp->ps_mtx);
622}
623
624static void
625init_proc0(vm_offset_t kstack)
626{
627	struct pcpu *pcpup = &__pcpu[0];
628
629	proc_linkup0(&proc0, &thread0);
630	thread0.td_kstack = kstack;
631	thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1;
632	thread0.td_pcb->pcb_fpflags = 0;
633	thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
634	thread0.td_frame = &proc0_tf;
635	pcpup->pc_curpcb = thread0.td_pcb;
636}
637
638typedef struct {
639	uint32_t type;
640	uint64_t phys_start;
641	uint64_t virt_start;
642	uint64_t num_pages;
643	uint64_t attr;
644} EFI_MEMORY_DESCRIPTOR;
645
646static int
647add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
648    u_int *physmap_idxp)
649{
650	u_int i, insert_idx, _physmap_idx;
651
652	_physmap_idx = *physmap_idxp;
653
654	if (length == 0)
655		return (1);
656
657	/*
658	 * Find insertion point while checking for overlap.  Start off by
659	 * assuming the new entry will be added to the end.
660	 */
661	insert_idx = _physmap_idx;
662	for (i = 0; i <= _physmap_idx; i += 2) {
663		if (base < physmap[i + 1]) {
664			if (base + length <= physmap[i]) {
665				insert_idx = i;
666				break;
667			}
668			if (boothowto & RB_VERBOSE)
669				printf(
670		    "Overlapping memory regions, ignoring second region\n");
671			return (1);
672		}
673	}
674
675	/* See if we can prepend to the next entry. */
676	if (insert_idx <= _physmap_idx &&
677	    base + length == physmap[insert_idx]) {
678		physmap[insert_idx] = base;
679		return (1);
680	}
681
682	/* See if we can append to the previous entry. */
683	if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
684		physmap[insert_idx - 1] += length;
685		return (1);
686	}
687
688	_physmap_idx += 2;
689	*physmap_idxp = _physmap_idx;
690	if (_physmap_idx == PHYSMAP_SIZE) {
691		printf(
692		"Too many segments in the physical address map, giving up\n");
693		return (0);
694	}
695
696	/*
697	 * Move the last 'N' entries down to make room for the new
698	 * entry if needed.
699	 */
700	for (i = _physmap_idx; i > insert_idx; i -= 2) {
701		physmap[i] = physmap[i - 2];
702		physmap[i + 1] = physmap[i - 1];
703	}
704
705	/* Insert the new entry. */
706	physmap[insert_idx] = base;
707	physmap[insert_idx + 1] = base + length;
708	return (1);
709}
710
711#ifdef FDT
712static void
713add_fdt_mem_regions(struct mem_region *mr, int mrcnt, vm_paddr_t *physmap,
714    u_int *physmap_idxp)
715{
716
717	for (int i = 0; i < mrcnt; i++) {
718		if (!add_physmap_entry(mr[i].mr_start, mr[i].mr_size, physmap,
719		    physmap_idxp))
720			break;
721	}
722}
723#endif
724
725static void
726add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
727    u_int *physmap_idxp)
728{
729	struct efi_md *map, *p;
730	const char *type;
731	size_t efisz;
732	int ndesc, i;
733
734	static const char *types[] = {
735		"Reserved",
736		"LoaderCode",
737		"LoaderData",
738		"BootServicesCode",
739		"BootServicesData",
740		"RuntimeServicesCode",
741		"RuntimeServicesData",
742		"ConventionalMemory",
743		"UnusableMemory",
744		"ACPIReclaimMemory",
745		"ACPIMemoryNVS",
746		"MemoryMappedIO",
747		"MemoryMappedIOPortSpace",
748		"PalCode",
749		"PersistentMemory"
750	};
751
752	/*
753	 * Memory map data provided by UEFI via the GetMemoryMap
754	 * Boot Services API.
755	 */
756	efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
757	map = (struct efi_md *)((uint8_t *)efihdr + efisz);
758
759	if (efihdr->descriptor_size == 0)
760		return;
761	ndesc = efihdr->memory_size / efihdr->descriptor_size;
762
763	if (boothowto & RB_VERBOSE)
764		printf("%23s %12s %12s %8s %4s\n",
765		    "Type", "Physical", "Virtual", "#Pages", "Attr");
766
767	for (i = 0, p = map; i < ndesc; i++,
768	    p = efi_next_descriptor(p, efihdr->descriptor_size)) {
769		if (boothowto & RB_VERBOSE) {
770			if (p->md_type < nitems(types))
771				type = types[p->md_type];
772			else
773				type = "<INVALID>";
774			printf("%23s %012lx %12p %08lx ", type, p->md_phys,
775			    p->md_virt, p->md_pages);
776			if (p->md_attr & EFI_MD_ATTR_UC)
777				printf("UC ");
778			if (p->md_attr & EFI_MD_ATTR_WC)
779				printf("WC ");
780			if (p->md_attr & EFI_MD_ATTR_WT)
781				printf("WT ");
782			if (p->md_attr & EFI_MD_ATTR_WB)
783				printf("WB ");
784			if (p->md_attr & EFI_MD_ATTR_UCE)
785				printf("UCE ");
786			if (p->md_attr & EFI_MD_ATTR_WP)
787				printf("WP ");
788			if (p->md_attr & EFI_MD_ATTR_RP)
789				printf("RP ");
790			if (p->md_attr & EFI_MD_ATTR_XP)
791				printf("XP ");
792			if (p->md_attr & EFI_MD_ATTR_NV)
793				printf("NV ");
794			if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
795				printf("MORE_RELIABLE ");
796			if (p->md_attr & EFI_MD_ATTR_RO)
797				printf("RO ");
798			if (p->md_attr & EFI_MD_ATTR_RT)
799				printf("RUNTIME");
800			printf("\n");
801		}
802
803		switch (p->md_type) {
804		case EFI_MD_TYPE_CODE:
805		case EFI_MD_TYPE_DATA:
806		case EFI_MD_TYPE_BS_CODE:
807		case EFI_MD_TYPE_BS_DATA:
808		case EFI_MD_TYPE_FREE:
809			/*
810			 * We're allowed to use any entry with these types.
811			 */
812			break;
813		default:
814			continue;
815		}
816
817		if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
818		    physmap, physmap_idxp))
819			break;
820	}
821}
822
823#ifdef FDT
824static void
825try_load_dtb(caddr_t kmdp)
826{
827	vm_offset_t dtbp;
828
829	dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
830	if (dtbp == (vm_offset_t)NULL) {
831		printf("ERROR loading DTB\n");
832		return;
833	}
834
835	if (OF_install(OFW_FDT, 0) == FALSE)
836		panic("Cannot install FDT");
837
838	if (OF_init((void *)dtbp) != 0)
839		panic("OF_init failed with the found device tree");
840}
841#endif
842
843static void
844cache_setup(void)
845{
846	int dcache_line_shift, icache_line_shift, dczva_line_shift;
847	uint32_t ctr_el0;
848	uint32_t dczid_el0;
849
850	ctr_el0 = READ_SPECIALREG(ctr_el0);
851
852	/* Read the log2 words in each D cache line */
853	dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
854	/* Get the D cache line size */
855	dcache_line_size = sizeof(int) << dcache_line_shift;
856
857	/* And the same for the I cache */
858	icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
859	icache_line_size = sizeof(int) << icache_line_shift;
860
861	idcache_line_size = MIN(dcache_line_size, icache_line_size);
862
863	dczid_el0 = READ_SPECIALREG(dczid_el0);
864
865	/* Check if dc zva is not prohibited */
866	if (dczid_el0 & DCZID_DZP)
867		dczva_line_size = 0;
868	else {
869		/* Same as with above calculations */
870		dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
871		dczva_line_size = sizeof(int) << dczva_line_shift;
872
873		/* Change pagezero function */
874		pagezero = pagezero_cache;
875	}
876}
877
878void
879initarm(struct arm64_bootparams *abp)
880{
881	struct efi_map_header *efihdr;
882	struct pcpu *pcpup;
883#ifdef FDT
884	struct mem_region mem_regions[FDT_MEM_REGIONS];
885	int mem_regions_sz;
886#endif
887	vm_offset_t lastaddr;
888	caddr_t kmdp;
889	vm_paddr_t mem_len;
890	int i;
891
892	/* Set the module data location */
893	preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
894
895	/* Find the kernel address */
896	kmdp = preload_search_by_type("elf kernel");
897	if (kmdp == NULL)
898		kmdp = preload_search_by_type("elf64 kernel");
899
900	boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
901	init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0);
902
903#ifdef FDT
904	try_load_dtb(kmdp);
905#endif
906
907	/* Find the address to start allocating from */
908	lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
909
910	/* Load the physical memory ranges */
911	physmap_idx = 0;
912	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
913	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
914	if (efihdr != NULL)
915		add_efi_map_entries(efihdr, physmap, &physmap_idx);
916#ifdef FDT
917	else {
918		/* Grab physical memory regions information from device tree. */
919		if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
920		    NULL) != 0)
921			panic("Cannot get physical memory regions");
922		add_fdt_mem_regions(mem_regions, mem_regions_sz, physmap,
923		    &physmap_idx);
924	}
925#endif
926
927	/* Print the memory map */
928	mem_len = 0;
929	for (i = 0; i < physmap_idx; i += 2) {
930		dump_avail[i] = physmap[i];
931		dump_avail[i + 1] = physmap[i + 1];
932		mem_len += physmap[i + 1] - physmap[i];
933	}
934	dump_avail[i] = 0;
935	dump_avail[i + 1] = 0;
936
937	/* Set the pcpu data, this is needed by pmap_bootstrap */
938	pcpup = &__pcpu[0];
939	pcpu_init(pcpup, 0, sizeof(struct pcpu));
940
941	/*
942	 * Set the pcpu pointer with a backup in tpidr_el1 to be
943	 * loaded when entering the kernel from userland.
944	 */
945	__asm __volatile(
946	    "mov x18, %0 \n"
947	    "msr tpidr_el1, %0" :: "r"(pcpup));
948
949	PCPU_SET(curthread, &thread0);
950
951	/* Do basic tuning, hz etc */
952	init_param1();
953
954	cache_setup();
955	pan_setup();
956
957	/* Bootstrap enough of pmap  to enter the kernel proper */
958	pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
959	    KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
960
961	devmap_bootstrap(0, NULL);
962
963	cninit();
964
965	init_proc0(abp->kern_stack);
966	msgbufinit(msgbufp, msgbufsize);
967	mutex_init();
968	init_param2(physmem);
969
970	dbg_monitor_init();
971	kdb_init();
972	pan_enable();
973
974	early_boot = 0;
975}
976
977#ifdef DDB
978#include <ddb/ddb.h>
979
980DB_SHOW_COMMAND(specialregs, db_show_spregs)
981{
982#define	PRINT_REG(reg)	\
983    db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
984
985	PRINT_REG(actlr_el1);
986	PRINT_REG(afsr0_el1);
987	PRINT_REG(afsr1_el1);
988	PRINT_REG(aidr_el1);
989	PRINT_REG(amair_el1);
990	PRINT_REG(ccsidr_el1);
991	PRINT_REG(clidr_el1);
992	PRINT_REG(contextidr_el1);
993	PRINT_REG(cpacr_el1);
994	PRINT_REG(csselr_el1);
995	PRINT_REG(ctr_el0);
996	PRINT_REG(currentel);
997	PRINT_REG(daif);
998	PRINT_REG(dczid_el0);
999	PRINT_REG(elr_el1);
1000	PRINT_REG(esr_el1);
1001	PRINT_REG(far_el1);
1002#if 0
1003	/* ARM64TODO: Enable VFP before reading floating-point registers */
1004	PRINT_REG(fpcr);
1005	PRINT_REG(fpsr);
1006#endif
1007	PRINT_REG(id_aa64afr0_el1);
1008	PRINT_REG(id_aa64afr1_el1);
1009	PRINT_REG(id_aa64dfr0_el1);
1010	PRINT_REG(id_aa64dfr1_el1);
1011	PRINT_REG(id_aa64isar0_el1);
1012	PRINT_REG(id_aa64isar1_el1);
1013	PRINT_REG(id_aa64pfr0_el1);
1014	PRINT_REG(id_aa64pfr1_el1);
1015	PRINT_REG(id_afr0_el1);
1016	PRINT_REG(id_dfr0_el1);
1017	PRINT_REG(id_isar0_el1);
1018	PRINT_REG(id_isar1_el1);
1019	PRINT_REG(id_isar2_el1);
1020	PRINT_REG(id_isar3_el1);
1021	PRINT_REG(id_isar4_el1);
1022	PRINT_REG(id_isar5_el1);
1023	PRINT_REG(id_mmfr0_el1);
1024	PRINT_REG(id_mmfr1_el1);
1025	PRINT_REG(id_mmfr2_el1);
1026	PRINT_REG(id_mmfr3_el1);
1027#if 0
1028	/* Missing from llvm */
1029	PRINT_REG(id_mmfr4_el1);
1030#endif
1031	PRINT_REG(id_pfr0_el1);
1032	PRINT_REG(id_pfr1_el1);
1033	PRINT_REG(isr_el1);
1034	PRINT_REG(mair_el1);
1035	PRINT_REG(midr_el1);
1036	PRINT_REG(mpidr_el1);
1037	PRINT_REG(mvfr0_el1);
1038	PRINT_REG(mvfr1_el1);
1039	PRINT_REG(mvfr2_el1);
1040	PRINT_REG(revidr_el1);
1041	PRINT_REG(sctlr_el1);
1042	PRINT_REG(sp_el0);
1043	PRINT_REG(spsel);
1044	PRINT_REG(spsr_el1);
1045	PRINT_REG(tcr_el1);
1046	PRINT_REG(tpidr_el0);
1047	PRINT_REG(tpidr_el1);
1048	PRINT_REG(tpidrro_el0);
1049	PRINT_REG(ttbr0_el1);
1050	PRINT_REG(ttbr1_el1);
1051	PRINT_REG(vbar_el1);
1052#undef PRINT_REG
1053}
1054
1055DB_SHOW_COMMAND(vtop, db_show_vtop)
1056{
1057	uint64_t phys;
1058
1059	if (have_addr) {
1060		phys = arm64_address_translate_s1e1r(addr);
1061		db_printf("Physical address reg (read):  0x%016lx\n", phys);
1062		phys = arm64_address_translate_s1e1w(addr);
1063		db_printf("Physical address reg (write): 0x%016lx\n", phys);
1064	} else
1065		db_printf("show vtop <virt_addr>\n");
1066}
1067#endif
1068