machdep.c revision 306085
1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include "opt_platform.h"
29#include "opt_ddb.h"
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: stable/11/sys/arm64/arm64/machdep.c 306085 2016-09-21 09:45:14Z andrew $");
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/buf.h>
37#include <sys/bus.h>
38#include <sys/cons.h>
39#include <sys/cpu.h>
40#include <sys/devmap.h>
41#include <sys/efi.h>
42#include <sys/exec.h>
43#include <sys/imgact.h>
44#include <sys/kdb.h>
45#include <sys/kernel.h>
46#include <sys/limits.h>
47#include <sys/linker.h>
48#include <sys/msgbuf.h>
49#include <sys/pcpu.h>
50#include <sys/proc.h>
51#include <sys/ptrace.h>
52#include <sys/reboot.h>
53#include <sys/rwlock.h>
54#include <sys/sched.h>
55#include <sys/signalvar.h>
56#include <sys/syscallsubr.h>
57#include <sys/sysent.h>
58#include <sys/sysproto.h>
59#include <sys/ucontext.h>
60#include <sys/vdso.h>
61
62#include <vm/vm.h>
63#include <vm/vm_kern.h>
64#include <vm/vm_object.h>
65#include <vm/vm_page.h>
66#include <vm/pmap.h>
67#include <vm/vm_map.h>
68#include <vm/vm_pager.h>
69
70#include <machine/armreg.h>
71#include <machine/cpu.h>
72#include <machine/debug_monitor.h>
73#include <machine/kdb.h>
74#include <machine/machdep.h>
75#include <machine/metadata.h>
76#include <machine/md_var.h>
77#include <machine/pcb.h>
78#include <machine/reg.h>
79#include <machine/vmparam.h>
80
81#ifdef VFP
82#include <machine/vfp.h>
83#endif
84
85#ifdef FDT
86#include <dev/fdt/fdt_common.h>
87#include <dev/ofw/openfirm.h>
88#endif
89
90struct pcpu __pcpu[MAXCPU];
91
92static struct trapframe proc0_tf;
93
94vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2];
95vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2];
96
97int early_boot = 1;
98int cold = 1;
99long realmem = 0;
100long Maxmem = 0;
101
102#define	PHYSMAP_SIZE	(2 * (VM_PHYSSEG_MAX - 1))
103vm_paddr_t physmap[PHYSMAP_SIZE];
104u_int physmap_idx;
105
106struct kva_md_info kmi;
107
108int64_t dcache_line_size;	/* The minimum D cache line size */
109int64_t icache_line_size;	/* The minimum I cache line size */
110int64_t idcache_line_size;	/* The minimum cache line size */
111int64_t dczva_line_size;	/* The size of cache line the dc zva zeroes */
112
113/* pagezero_* implementations are provided in support.S */
114void pagezero_simple(void *);
115void pagezero_cache(void *);
116
117/* pagezero_simple is default pagezero */
118void (*pagezero)(void *p) = pagezero_simple;
119
120static void
121cpu_startup(void *dummy)
122{
123
124	identify_cpu();
125
126	vm_ksubmap_init(&kmi);
127	bufinit();
128	vm_pager_bufferinit();
129}
130
131SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
132
133int
134cpu_idle_wakeup(int cpu)
135{
136
137	return (0);
138}
139
140int
141fill_regs(struct thread *td, struct reg *regs)
142{
143	struct trapframe *frame;
144
145	frame = td->td_frame;
146	regs->sp = frame->tf_sp;
147	regs->lr = frame->tf_lr;
148	regs->elr = frame->tf_elr;
149	regs->spsr = frame->tf_spsr;
150
151	memcpy(regs->x, frame->tf_x, sizeof(regs->x));
152
153	return (0);
154}
155
156int
157set_regs(struct thread *td, struct reg *regs)
158{
159	struct trapframe *frame;
160
161	frame = td->td_frame;
162	frame->tf_sp = regs->sp;
163	frame->tf_lr = regs->lr;
164	frame->tf_elr = regs->elr;
165	frame->tf_spsr = regs->spsr;
166
167	memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
168
169	return (0);
170}
171
172int
173fill_fpregs(struct thread *td, struct fpreg *regs)
174{
175#ifdef VFP
176	struct pcb *pcb;
177
178	pcb = td->td_pcb;
179	if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
180		/*
181		 * If we have just been running VFP instructions we will
182		 * need to save the state to memcpy it below.
183		 */
184		vfp_save_state(td, pcb);
185
186		memcpy(regs->fp_q, pcb->pcb_vfp, sizeof(regs->fp_q));
187		regs->fp_cr = pcb->pcb_fpcr;
188		regs->fp_sr = pcb->pcb_fpsr;
189	} else
190#endif
191		memset(regs->fp_q, 0, sizeof(regs->fp_q));
192	return (0);
193}
194
195int
196set_fpregs(struct thread *td, struct fpreg *regs)
197{
198#ifdef VFP
199	struct pcb *pcb;
200
201	pcb = td->td_pcb;
202	memcpy(pcb->pcb_vfp, regs->fp_q, sizeof(regs->fp_q));
203	pcb->pcb_fpcr = regs->fp_cr;
204	pcb->pcb_fpsr = regs->fp_sr;
205#endif
206	return (0);
207}
208
209int
210fill_dbregs(struct thread *td, struct dbreg *regs)
211{
212
213	panic("ARM64TODO: fill_dbregs");
214}
215
216int
217set_dbregs(struct thread *td, struct dbreg *regs)
218{
219
220	panic("ARM64TODO: set_dbregs");
221}
222
223int
224ptrace_set_pc(struct thread *td, u_long addr)
225{
226
227	panic("ARM64TODO: ptrace_set_pc");
228	return (0);
229}
230
231int
232ptrace_single_step(struct thread *td)
233{
234
235	td->td_frame->tf_spsr |= PSR_SS;
236	td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
237	return (0);
238}
239
240int
241ptrace_clear_single_step(struct thread *td)
242{
243
244	td->td_frame->tf_spsr &= ~PSR_SS;
245	td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
246	return (0);
247}
248
249void
250exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
251{
252	struct trapframe *tf = td->td_frame;
253
254	memset(tf, 0, sizeof(struct trapframe));
255
256	/*
257	 * We need to set x0 for init as it doesn't call
258	 * cpu_set_syscall_retval to copy the value. We also
259	 * need to set td_retval for the cases where we do.
260	 */
261	tf->tf_x[0] = td->td_retval[0] = stack;
262	tf->tf_sp = STACKALIGN(stack);
263	tf->tf_lr = imgp->entry_addr;
264	tf->tf_elr = imgp->entry_addr;
265}
266
267/* Sanity check these are the same size, they will be memcpy'd to and fro */
268CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
269    sizeof((struct gpregs *)0)->gp_x);
270CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
271    sizeof((struct reg *)0)->x);
272
273int
274get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
275{
276	struct trapframe *tf = td->td_frame;
277
278	if (clear_ret & GET_MC_CLEAR_RET) {
279		mcp->mc_gpregs.gp_x[0] = 0;
280		mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
281	} else {
282		mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
283		mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
284	}
285
286	memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
287	    sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
288
289	mcp->mc_gpregs.gp_sp = tf->tf_sp;
290	mcp->mc_gpregs.gp_lr = tf->tf_lr;
291	mcp->mc_gpregs.gp_elr = tf->tf_elr;
292
293	return (0);
294}
295
296int
297set_mcontext(struct thread *td, mcontext_t *mcp)
298{
299	struct trapframe *tf = td->td_frame;
300
301	memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
302
303	tf->tf_sp = mcp->mc_gpregs.gp_sp;
304	tf->tf_lr = mcp->mc_gpregs.gp_lr;
305	tf->tf_elr = mcp->mc_gpregs.gp_elr;
306	tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
307
308	return (0);
309}
310
311static void
312get_fpcontext(struct thread *td, mcontext_t *mcp)
313{
314#ifdef VFP
315	struct pcb *curpcb;
316
317	critical_enter();
318
319	curpcb = curthread->td_pcb;
320
321	if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
322		/*
323		 * If we have just been running VFP instructions we will
324		 * need to save the state to memcpy it below.
325		 */
326		vfp_save_state(td, curpcb);
327
328		memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_vfp,
329		    sizeof(mcp->mc_fpregs));
330		mcp->mc_fpregs.fp_cr = curpcb->pcb_fpcr;
331		mcp->mc_fpregs.fp_sr = curpcb->pcb_fpsr;
332		mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
333		mcp->mc_flags |= _MC_FP_VALID;
334	}
335
336	critical_exit();
337#endif
338}
339
340static void
341set_fpcontext(struct thread *td, mcontext_t *mcp)
342{
343#ifdef VFP
344	struct pcb *curpcb;
345
346	critical_enter();
347
348	if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
349		curpcb = curthread->td_pcb;
350
351		/*
352		 * Discard any vfp state for the current thread, we
353		 * are about to override it.
354		 */
355		vfp_discard(td);
356
357		memcpy(curpcb->pcb_vfp, mcp->mc_fpregs.fp_q,
358		    sizeof(mcp->mc_fpregs));
359		curpcb->pcb_fpcr = mcp->mc_fpregs.fp_cr;
360		curpcb->pcb_fpsr = mcp->mc_fpregs.fp_sr;
361		curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags;
362	}
363
364	critical_exit();
365#endif
366}
367
368void
369cpu_idle(int busy)
370{
371
372	spinlock_enter();
373	if (!busy)
374		cpu_idleclock();
375	if (!sched_runnable())
376		__asm __volatile(
377		    "dsb sy \n"
378		    "wfi    \n");
379	if (!busy)
380		cpu_activeclock();
381	spinlock_exit();
382}
383
384void
385cpu_halt(void)
386{
387
388	/* We should have shutdown by now, if not enter a low power sleep */
389	intr_disable();
390	while (1) {
391		__asm __volatile("wfi");
392	}
393}
394
395/*
396 * Flush the D-cache for non-DMA I/O so that the I-cache can
397 * be made coherent later.
398 */
399void
400cpu_flush_dcache(void *ptr, size_t len)
401{
402
403	/* ARM64TODO TBD */
404}
405
406/* Get current clock frequency for the given CPU ID. */
407int
408cpu_est_clockrate(int cpu_id, uint64_t *rate)
409{
410	struct pcpu *pc;
411
412	pc = pcpu_find(cpu_id);
413	if (pc == NULL || rate == NULL)
414		return (EINVAL);
415
416	if (pc->pc_clock == 0)
417		return (EOPNOTSUPP);
418
419	*rate = pc->pc_clock;
420	return (0);
421}
422
423void
424cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
425{
426
427	pcpu->pc_acpi_id = 0xffffffff;
428}
429
430void
431spinlock_enter(void)
432{
433	struct thread *td;
434	register_t daif;
435
436	td = curthread;
437	if (td->td_md.md_spinlock_count == 0) {
438		daif = intr_disable();
439		td->td_md.md_spinlock_count = 1;
440		td->td_md.md_saved_daif = daif;
441	} else
442		td->td_md.md_spinlock_count++;
443	critical_enter();
444}
445
446void
447spinlock_exit(void)
448{
449	struct thread *td;
450	register_t daif;
451
452	td = curthread;
453	critical_exit();
454	daif = td->td_md.md_saved_daif;
455	td->td_md.md_spinlock_count--;
456	if (td->td_md.md_spinlock_count == 0)
457		intr_restore(daif);
458}
459
460#ifndef	_SYS_SYSPROTO_H_
461struct sigreturn_args {
462	ucontext_t *ucp;
463};
464#endif
465
466int
467sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
468{
469	ucontext_t uc;
470	uint32_t spsr;
471
472	if (uap == NULL)
473		return (EFAULT);
474	if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
475		return (EFAULT);
476
477	spsr = uc.uc_mcontext.mc_gpregs.gp_spsr;
478	if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
479	    (spsr & (PSR_F | PSR_I | PSR_A | PSR_D)) != 0)
480		return (EINVAL);
481
482	set_mcontext(td, &uc.uc_mcontext);
483	set_fpcontext(td, &uc.uc_mcontext);
484
485	/* Restore signal mask. */
486	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
487
488	return (EJUSTRETURN);
489}
490
491/*
492 * Construct a PCB from a trapframe. This is called from kdb_trap() where
493 * we want to start a backtrace from the function that caused us to enter
494 * the debugger. We have the context in the trapframe, but base the trace
495 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
496 * enough for a backtrace.
497 */
498void
499makectx(struct trapframe *tf, struct pcb *pcb)
500{
501	int i;
502
503	for (i = 0; i < PCB_LR; i++)
504		pcb->pcb_x[i] = tf->tf_x[i];
505
506	pcb->pcb_x[PCB_LR] = tf->tf_lr;
507	pcb->pcb_pc = tf->tf_elr;
508	pcb->pcb_sp = tf->tf_sp;
509}
510
511void
512sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
513{
514	struct thread *td;
515	struct proc *p;
516	struct trapframe *tf;
517	struct sigframe *fp, frame;
518	struct sigacts *psp;
519	struct sysentvec *sysent;
520	int code, onstack, sig;
521
522	td = curthread;
523	p = td->td_proc;
524	PROC_LOCK_ASSERT(p, MA_OWNED);
525
526	sig = ksi->ksi_signo;
527	code = ksi->ksi_code;
528	psp = p->p_sigacts;
529	mtx_assert(&psp->ps_mtx, MA_OWNED);
530
531	tf = td->td_frame;
532	onstack = sigonstack(tf->tf_sp);
533
534	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
535	    catcher, sig);
536
537	/* Allocate and validate space for the signal handler context. */
538	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
539	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
540		fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
541		    td->td_sigstk.ss_size);
542#if defined(COMPAT_43)
543		td->td_sigstk.ss_flags |= SS_ONSTACK;
544#endif
545	} else {
546		fp = (struct sigframe *)td->td_frame->tf_sp;
547	}
548
549	/* Make room, keeping the stack aligned */
550	fp--;
551	fp = (struct sigframe *)STACKALIGN(fp);
552
553	/* Fill in the frame to copy out */
554	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
555	get_fpcontext(td, &frame.sf_uc.uc_mcontext);
556	frame.sf_si = ksi->ksi_info;
557	frame.sf_uc.uc_sigmask = *mask;
558	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
559	    ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
560	frame.sf_uc.uc_stack = td->td_sigstk;
561	mtx_unlock(&psp->ps_mtx);
562	PROC_UNLOCK(td->td_proc);
563
564	/* Copy the sigframe out to the user's stack. */
565	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
566		/* Process has trashed its stack. Kill it. */
567		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
568		PROC_LOCK(p);
569		sigexit(td, SIGILL);
570	}
571
572	tf->tf_x[0]= sig;
573	tf->tf_x[1] = (register_t)&fp->sf_si;
574	tf->tf_x[2] = (register_t)&fp->sf_uc;
575
576	tf->tf_elr = (register_t)catcher;
577	tf->tf_sp = (register_t)fp;
578	sysent = p->p_sysent;
579	if (sysent->sv_sigcode_base != 0)
580		tf->tf_lr = (register_t)sysent->sv_sigcode_base;
581	else
582		tf->tf_lr = (register_t)(sysent->sv_psstrings -
583		    *(sysent->sv_szsigcode));
584
585	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
586	    tf->tf_sp);
587
588	PROC_LOCK(p);
589	mtx_lock(&psp->ps_mtx);
590}
591
592static void
593init_proc0(vm_offset_t kstack)
594{
595	struct pcpu *pcpup = &__pcpu[0];
596
597	proc_linkup0(&proc0, &thread0);
598	thread0.td_kstack = kstack;
599	thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1;
600	thread0.td_pcb->pcb_fpflags = 0;
601	thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
602	thread0.td_frame = &proc0_tf;
603	pcpup->pc_curpcb = thread0.td_pcb;
604}
605
606typedef struct {
607	uint32_t type;
608	uint64_t phys_start;
609	uint64_t virt_start;
610	uint64_t num_pages;
611	uint64_t attr;
612} EFI_MEMORY_DESCRIPTOR;
613
614static int
615add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
616    u_int *physmap_idxp)
617{
618	u_int i, insert_idx, _physmap_idx;
619
620	_physmap_idx = *physmap_idxp;
621
622	if (length == 0)
623		return (1);
624
625	/*
626	 * Find insertion point while checking for overlap.  Start off by
627	 * assuming the new entry will be added to the end.
628	 */
629	insert_idx = _physmap_idx;
630	for (i = 0; i <= _physmap_idx; i += 2) {
631		if (base < physmap[i + 1]) {
632			if (base + length <= physmap[i]) {
633				insert_idx = i;
634				break;
635			}
636			if (boothowto & RB_VERBOSE)
637				printf(
638		    "Overlapping memory regions, ignoring second region\n");
639			return (1);
640		}
641	}
642
643	/* See if we can prepend to the next entry. */
644	if (insert_idx <= _physmap_idx &&
645	    base + length == physmap[insert_idx]) {
646		physmap[insert_idx] = base;
647		return (1);
648	}
649
650	/* See if we can append to the previous entry. */
651	if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
652		physmap[insert_idx - 1] += length;
653		return (1);
654	}
655
656	_physmap_idx += 2;
657	*physmap_idxp = _physmap_idx;
658	if (_physmap_idx == PHYSMAP_SIZE) {
659		printf(
660		"Too many segments in the physical address map, giving up\n");
661		return (0);
662	}
663
664	/*
665	 * Move the last 'N' entries down to make room for the new
666	 * entry if needed.
667	 */
668	for (i = _physmap_idx; i > insert_idx; i -= 2) {
669		physmap[i] = physmap[i - 2];
670		physmap[i + 1] = physmap[i - 1];
671	}
672
673	/* Insert the new entry. */
674	physmap[insert_idx] = base;
675	physmap[insert_idx + 1] = base + length;
676	return (1);
677}
678
679#ifdef FDT
680static void
681add_fdt_mem_regions(struct mem_region *mr, int mrcnt, vm_paddr_t *physmap,
682    u_int *physmap_idxp)
683{
684
685	for (int i = 0; i < mrcnt; i++) {
686		if (!add_physmap_entry(mr[i].mr_start, mr[i].mr_size, physmap,
687		    physmap_idxp))
688			break;
689	}
690}
691#endif
692
693#define efi_next_descriptor(ptr, size) \
694	((struct efi_md *)(((uint8_t *) ptr) + size))
695
696static void
697add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
698    u_int *physmap_idxp)
699{
700	struct efi_md *map, *p;
701	const char *type;
702	size_t efisz;
703	int ndesc, i;
704
705	static const char *types[] = {
706		"Reserved",
707		"LoaderCode",
708		"LoaderData",
709		"BootServicesCode",
710		"BootServicesData",
711		"RuntimeServicesCode",
712		"RuntimeServicesData",
713		"ConventionalMemory",
714		"UnusableMemory",
715		"ACPIReclaimMemory",
716		"ACPIMemoryNVS",
717		"MemoryMappedIO",
718		"MemoryMappedIOPortSpace",
719		"PalCode"
720	};
721
722	/*
723	 * Memory map data provided by UEFI via the GetMemoryMap
724	 * Boot Services API.
725	 */
726	efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
727	map = (struct efi_md *)((uint8_t *)efihdr + efisz);
728
729	if (efihdr->descriptor_size == 0)
730		return;
731	ndesc = efihdr->memory_size / efihdr->descriptor_size;
732
733	if (boothowto & RB_VERBOSE)
734		printf("%23s %12s %12s %8s %4s\n",
735		    "Type", "Physical", "Virtual", "#Pages", "Attr");
736
737	for (i = 0, p = map; i < ndesc; i++,
738	    p = efi_next_descriptor(p, efihdr->descriptor_size)) {
739		if (boothowto & RB_VERBOSE) {
740			if (p->md_type <= EFI_MD_TYPE_PALCODE)
741				type = types[p->md_type];
742			else
743				type = "<INVALID>";
744			printf("%23s %012lx %12p %08lx ", type, p->md_phys,
745			    p->md_virt, p->md_pages);
746			if (p->md_attr & EFI_MD_ATTR_UC)
747				printf("UC ");
748			if (p->md_attr & EFI_MD_ATTR_WC)
749				printf("WC ");
750			if (p->md_attr & EFI_MD_ATTR_WT)
751				printf("WT ");
752			if (p->md_attr & EFI_MD_ATTR_WB)
753				printf("WB ");
754			if (p->md_attr & EFI_MD_ATTR_UCE)
755				printf("UCE ");
756			if (p->md_attr & EFI_MD_ATTR_WP)
757				printf("WP ");
758			if (p->md_attr & EFI_MD_ATTR_RP)
759				printf("RP ");
760			if (p->md_attr & EFI_MD_ATTR_XP)
761				printf("XP ");
762			if (p->md_attr & EFI_MD_ATTR_RT)
763				printf("RUNTIME");
764			printf("\n");
765		}
766
767		switch (p->md_type) {
768		case EFI_MD_TYPE_CODE:
769		case EFI_MD_TYPE_DATA:
770		case EFI_MD_TYPE_BS_CODE:
771		case EFI_MD_TYPE_BS_DATA:
772		case EFI_MD_TYPE_FREE:
773			/*
774			 * We're allowed to use any entry with these types.
775			 */
776			break;
777		default:
778			continue;
779		}
780
781		if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
782		    physmap, physmap_idxp))
783			break;
784	}
785}
786
787#ifdef FDT
788static void
789try_load_dtb(caddr_t kmdp)
790{
791	vm_offset_t dtbp;
792
793	dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
794	if (dtbp == (vm_offset_t)NULL) {
795		printf("ERROR loading DTB\n");
796		return;
797	}
798
799	if (OF_install(OFW_FDT, 0) == FALSE)
800		panic("Cannot install FDT");
801
802	if (OF_init((void *)dtbp) != 0)
803		panic("OF_init failed with the found device tree");
804}
805#endif
806
807static void
808cache_setup(void)
809{
810	int dcache_line_shift, icache_line_shift, dczva_line_shift;
811	uint32_t ctr_el0;
812	uint32_t dczid_el0;
813
814	ctr_el0 = READ_SPECIALREG(ctr_el0);
815
816	/* Read the log2 words in each D cache line */
817	dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
818	/* Get the D cache line size */
819	dcache_line_size = sizeof(int) << dcache_line_shift;
820
821	/* And the same for the I cache */
822	icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
823	icache_line_size = sizeof(int) << icache_line_shift;
824
825	idcache_line_size = MIN(dcache_line_size, icache_line_size);
826
827	dczid_el0 = READ_SPECIALREG(dczid_el0);
828
829	/* Check if dc zva is not prohibited */
830	if (dczid_el0 & DCZID_DZP)
831		dczva_line_size = 0;
832	else {
833		/* Same as with above calculations */
834		dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
835		dczva_line_size = sizeof(int) << dczva_line_shift;
836
837		/* Change pagezero function */
838		pagezero = pagezero_cache;
839	}
840}
841
842void
843initarm(struct arm64_bootparams *abp)
844{
845	struct efi_map_header *efihdr;
846	struct pcpu *pcpup;
847#ifdef FDT
848	struct mem_region mem_regions[FDT_MEM_REGIONS];
849	int mem_regions_sz;
850#endif
851	vm_offset_t lastaddr;
852	caddr_t kmdp;
853	vm_paddr_t mem_len;
854	int i;
855
856	/* Set the module data location */
857	preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
858
859	/* Find the kernel address */
860	kmdp = preload_search_by_type("elf kernel");
861	if (kmdp == NULL)
862		kmdp = preload_search_by_type("elf64 kernel");
863
864	boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
865	init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0);
866
867#ifdef FDT
868	try_load_dtb(kmdp);
869#endif
870
871	/* Find the address to start allocating from */
872	lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
873
874	/* Load the physical memory ranges */
875	physmap_idx = 0;
876	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
877	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
878	if (efihdr != NULL)
879		add_efi_map_entries(efihdr, physmap, &physmap_idx);
880#ifdef FDT
881	else {
882		/* Grab physical memory regions information from device tree. */
883		if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
884		    NULL) != 0)
885			panic("Cannot get physical memory regions");
886		add_fdt_mem_regions(mem_regions, mem_regions_sz, physmap,
887		    &physmap_idx);
888	}
889#endif
890
891	/* Print the memory map */
892	mem_len = 0;
893	for (i = 0; i < physmap_idx; i += 2) {
894		dump_avail[i] = physmap[i];
895		dump_avail[i + 1] = physmap[i + 1];
896		mem_len += physmap[i + 1] - physmap[i];
897	}
898	dump_avail[i] = 0;
899	dump_avail[i + 1] = 0;
900
901	/* Set the pcpu data, this is needed by pmap_bootstrap */
902	pcpup = &__pcpu[0];
903	pcpu_init(pcpup, 0, sizeof(struct pcpu));
904
905	/*
906	 * Set the pcpu pointer with a backup in tpidr_el1 to be
907	 * loaded when entering the kernel from userland.
908	 */
909	__asm __volatile(
910	    "mov x18, %0 \n"
911	    "msr tpidr_el1, %0" :: "r"(pcpup));
912
913	PCPU_SET(curthread, &thread0);
914
915	/* Do basic tuning, hz etc */
916	init_param1();
917
918	cache_setup();
919
920	/* Bootstrap enough of pmap  to enter the kernel proper */
921	pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
922	    KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
923
924	devmap_bootstrap(0, NULL);
925
926	cninit();
927
928	init_proc0(abp->kern_stack);
929	msgbufinit(msgbufp, msgbufsize);
930	mutex_init();
931	init_param2(physmem);
932
933	dbg_monitor_init();
934	kdb_init();
935
936	early_boot = 0;
937}
938
939#ifdef DDB
940#include <ddb/ddb.h>
941
942DB_SHOW_COMMAND(specialregs, db_show_spregs)
943{
944#define	PRINT_REG(reg)	\
945    db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
946
947	PRINT_REG(actlr_el1);
948	PRINT_REG(afsr0_el1);
949	PRINT_REG(afsr1_el1);
950	PRINT_REG(aidr_el1);
951	PRINT_REG(amair_el1);
952	PRINT_REG(ccsidr_el1);
953	PRINT_REG(clidr_el1);
954	PRINT_REG(contextidr_el1);
955	PRINT_REG(cpacr_el1);
956	PRINT_REG(csselr_el1);
957	PRINT_REG(ctr_el0);
958	PRINT_REG(currentel);
959	PRINT_REG(daif);
960	PRINT_REG(dczid_el0);
961	PRINT_REG(elr_el1);
962	PRINT_REG(esr_el1);
963	PRINT_REG(far_el1);
964#if 0
965	/* ARM64TODO: Enable VFP before reading floating-point registers */
966	PRINT_REG(fpcr);
967	PRINT_REG(fpsr);
968#endif
969	PRINT_REG(id_aa64afr0_el1);
970	PRINT_REG(id_aa64afr1_el1);
971	PRINT_REG(id_aa64dfr0_el1);
972	PRINT_REG(id_aa64dfr1_el1);
973	PRINT_REG(id_aa64isar0_el1);
974	PRINT_REG(id_aa64isar1_el1);
975	PRINT_REG(id_aa64pfr0_el1);
976	PRINT_REG(id_aa64pfr1_el1);
977	PRINT_REG(id_afr0_el1);
978	PRINT_REG(id_dfr0_el1);
979	PRINT_REG(id_isar0_el1);
980	PRINT_REG(id_isar1_el1);
981	PRINT_REG(id_isar2_el1);
982	PRINT_REG(id_isar3_el1);
983	PRINT_REG(id_isar4_el1);
984	PRINT_REG(id_isar5_el1);
985	PRINT_REG(id_mmfr0_el1);
986	PRINT_REG(id_mmfr1_el1);
987	PRINT_REG(id_mmfr2_el1);
988	PRINT_REG(id_mmfr3_el1);
989#if 0
990	/* Missing from llvm */
991	PRINT_REG(id_mmfr4_el1);
992#endif
993	PRINT_REG(id_pfr0_el1);
994	PRINT_REG(id_pfr1_el1);
995	PRINT_REG(isr_el1);
996	PRINT_REG(mair_el1);
997	PRINT_REG(midr_el1);
998	PRINT_REG(mpidr_el1);
999	PRINT_REG(mvfr0_el1);
1000	PRINT_REG(mvfr1_el1);
1001	PRINT_REG(mvfr2_el1);
1002	PRINT_REG(revidr_el1);
1003	PRINT_REG(sctlr_el1);
1004	PRINT_REG(sp_el0);
1005	PRINT_REG(spsel);
1006	PRINT_REG(spsr_el1);
1007	PRINT_REG(tcr_el1);
1008	PRINT_REG(tpidr_el0);
1009	PRINT_REG(tpidr_el1);
1010	PRINT_REG(tpidrro_el0);
1011	PRINT_REG(ttbr0_el1);
1012	PRINT_REG(ttbr1_el1);
1013	PRINT_REG(vbar_el1);
1014#undef PRINT_REG
1015}
1016
1017DB_SHOW_COMMAND(vtop, db_show_vtop)
1018{
1019	uint64_t phys;
1020
1021	if (have_addr) {
1022		phys = arm64_address_translate_s1e1r(addr);
1023		db_printf("Physical address reg (read):  0x%016lx\n", phys);
1024		phys = arm64_address_translate_s1e1w(addr);
1025		db_printf("Physical address reg (write): 0x%016lx\n", phys);
1026	} else
1027		db_printf("show vtop <virt_addr>\n");
1028}
1029#endif
1030