1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include "opt_platform.h"
29#include "opt_ddb.h"
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: stable/11/sys/arm64/arm64/machdep.c 341166 2018-11-28 21:20:51Z vangyzen $");
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/buf.h>
37#include <sys/bus.h>
38#include <sys/cons.h>
39#include <sys/cpu.h>
40#include <sys/devmap.h>
41#include <sys/efi.h>
42#include <sys/exec.h>
43#include <sys/imgact.h>
44#include <sys/kdb.h>
45#include <sys/kernel.h>
46#include <sys/limits.h>
47#include <sys/linker.h>
48#include <sys/msgbuf.h>
49#include <sys/pcpu.h>
50#include <sys/proc.h>
51#include <sys/ptrace.h>
52#include <sys/reboot.h>
53#include <sys/rwlock.h>
54#include <sys/sched.h>
55#include <sys/signalvar.h>
56#include <sys/syscallsubr.h>
57#include <sys/sysent.h>
58#include <sys/sysproto.h>
59#include <sys/ucontext.h>
60#include <sys/vdso.h>
61
62#include <vm/vm.h>
63#include <vm/vm_kern.h>
64#include <vm/vm_object.h>
65#include <vm/vm_page.h>
66#include <vm/pmap.h>
67#include <vm/vm_map.h>
68#include <vm/vm_pager.h>
69
70#include <machine/armreg.h>
71#include <machine/cpu.h>
72#include <machine/debug_monitor.h>
73#include <machine/kdb.h>
74#include <machine/machdep.h>
75#include <machine/metadata.h>
76#include <machine/md_var.h>
77#include <machine/pcb.h>
78#include <machine/reg.h>
79#include <machine/vmparam.h>
80
81#ifdef VFP
82#include <machine/vfp.h>
83#endif
84
85#ifdef FDT
86#include <dev/fdt/fdt_common.h>
87#include <dev/ofw/openfirm.h>
88#endif
89
90struct pcpu __pcpu[MAXCPU];
91
92static struct trapframe proc0_tf;
93
94vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2];
95vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2];
96
97int early_boot = 1;
98int cold = 1;
99long realmem = 0;
100long Maxmem = 0;
101
102#define	PHYSMAP_SIZE	(2 * (VM_PHYSSEG_MAX - 1))
103vm_paddr_t physmap[PHYSMAP_SIZE];
104u_int physmap_idx;
105
106struct kva_md_info kmi;
107
108int64_t dcache_line_size;	/* The minimum D cache line size */
109int64_t icache_line_size;	/* The minimum I cache line size */
110int64_t idcache_line_size;	/* The minimum cache line size */
111int64_t dczva_line_size;	/* The size of cache line the dc zva zeroes */
112int has_pan;
113
114/* pagezero_* implementations are provided in support.S */
115void pagezero_simple(void *);
116void pagezero_cache(void *);
117
118/* pagezero_simple is default pagezero */
119void (*pagezero)(void *p) = pagezero_simple;
120
121static void
122pan_setup(void)
123{
124	uint64_t id_aa64mfr1;
125
126	id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
127	if (ID_AA64MMFR1_PAN(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
128		has_pan = 1;
129}
130
131void
132pan_enable(void)
133{
134
135	/*
136	 * The LLVM integrated assembler doesn't understand the PAN
137	 * PSTATE field. Because of this we need to manually create
138	 * the instruction in an asm block. This is equivalent to:
139	 * msr pan, #1
140	 *
141	 * This sets the PAN bit, stopping the kernel from accessing
142	 * memory when userspace can also access it unless the kernel
143	 * uses the userspace load/store instructions.
144	 */
145	if (has_pan) {
146		WRITE_SPECIALREG(sctlr_el1,
147		    READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
148		__asm __volatile(".inst 0xd500409f | (0x1 << 8)");
149	}
150}
151
152static void
153cpu_startup(void *dummy)
154{
155
156	identify_cpu();
157
158	vm_ksubmap_init(&kmi);
159	bufinit();
160	vm_pager_bufferinit();
161}
162
163SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
164
165int
166cpu_idle_wakeup(int cpu)
167{
168
169	return (0);
170}
171
172int
173fill_regs(struct thread *td, struct reg *regs)
174{
175	struct trapframe *frame;
176
177	frame = td->td_frame;
178	regs->sp = frame->tf_sp;
179	regs->lr = frame->tf_lr;
180	regs->elr = frame->tf_elr;
181	regs->spsr = frame->tf_spsr;
182
183	memcpy(regs->x, frame->tf_x, sizeof(regs->x));
184
185	return (0);
186}
187
188int
189set_regs(struct thread *td, struct reg *regs)
190{
191	struct trapframe *frame;
192
193	frame = td->td_frame;
194	frame->tf_sp = regs->sp;
195	frame->tf_lr = regs->lr;
196	frame->tf_elr = regs->elr;
197	frame->tf_spsr &= ~PSR_FLAGS;
198	frame->tf_spsr |= regs->spsr & PSR_FLAGS;
199
200	memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
201
202	return (0);
203}
204
205int
206fill_fpregs(struct thread *td, struct fpreg *regs)
207{
208#ifdef VFP
209	struct pcb *pcb;
210
211	pcb = td->td_pcb;
212	if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
213		/*
214		 * If we have just been running VFP instructions we will
215		 * need to save the state to memcpy it below.
216		 */
217		if (td == curthread)
218			vfp_save_state(td, pcb);
219
220		memcpy(regs->fp_q, pcb->pcb_vfp, sizeof(regs->fp_q));
221		regs->fp_cr = pcb->pcb_fpcr;
222		regs->fp_sr = pcb->pcb_fpsr;
223	} else
224#endif
225		memset(regs, 0, sizeof(*regs));
226	return (0);
227}
228
229int
230set_fpregs(struct thread *td, struct fpreg *regs)
231{
232#ifdef VFP
233	struct pcb *pcb;
234
235	pcb = td->td_pcb;
236	memcpy(pcb->pcb_vfp, regs->fp_q, sizeof(regs->fp_q));
237	pcb->pcb_fpcr = regs->fp_cr;
238	pcb->pcb_fpsr = regs->fp_sr;
239#endif
240	return (0);
241}
242
243int
244fill_dbregs(struct thread *td, struct dbreg *regs)
245{
246
247	printf("ARM64TODO: fill_dbregs");
248	return (EDOOFUS);
249}
250
251int
252set_dbregs(struct thread *td, struct dbreg *regs)
253{
254
255	printf("ARM64TODO: set_dbregs");
256	return (EDOOFUS);
257}
258
259int
260ptrace_set_pc(struct thread *td, u_long addr)
261{
262
263	printf("ARM64TODO: ptrace_set_pc");
264	return (EDOOFUS);
265}
266
267int
268ptrace_single_step(struct thread *td)
269{
270
271	td->td_frame->tf_spsr |= PSR_SS;
272	td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
273	return (0);
274}
275
276int
277ptrace_clear_single_step(struct thread *td)
278{
279
280	td->td_frame->tf_spsr &= ~PSR_SS;
281	td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
282	return (0);
283}
284
285void
286exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
287{
288	struct trapframe *tf = td->td_frame;
289
290	memset(tf, 0, sizeof(struct trapframe));
291
292	/*
293	 * We need to set x0 for init as it doesn't call
294	 * cpu_set_syscall_retval to copy the value. We also
295	 * need to set td_retval for the cases where we do.
296	 */
297	tf->tf_x[0] = td->td_retval[0] = stack;
298	tf->tf_sp = STACKALIGN(stack);
299	tf->tf_lr = imgp->entry_addr;
300	tf->tf_elr = imgp->entry_addr;
301}
302
303/* Sanity check these are the same size, they will be memcpy'd to and fro */
304CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
305    sizeof((struct gpregs *)0)->gp_x);
306CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
307    sizeof((struct reg *)0)->x);
308
309int
310get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
311{
312	struct trapframe *tf = td->td_frame;
313
314	if (clear_ret & GET_MC_CLEAR_RET) {
315		mcp->mc_gpregs.gp_x[0] = 0;
316		mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
317	} else {
318		mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
319		mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
320	}
321
322	memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
323	    sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
324
325	mcp->mc_gpregs.gp_sp = tf->tf_sp;
326	mcp->mc_gpregs.gp_lr = tf->tf_lr;
327	mcp->mc_gpregs.gp_elr = tf->tf_elr;
328
329	return (0);
330}
331
332int
333set_mcontext(struct thread *td, mcontext_t *mcp)
334{
335	struct trapframe *tf = td->td_frame;
336	uint32_t spsr;
337
338	spsr = mcp->mc_gpregs.gp_spsr;
339	if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
340	    (spsr & (PSR_F | PSR_I | PSR_A | PSR_D)) != 0)
341		return (EINVAL);
342
343	memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
344
345	tf->tf_sp = mcp->mc_gpregs.gp_sp;
346	tf->tf_lr = mcp->mc_gpregs.gp_lr;
347	tf->tf_elr = mcp->mc_gpregs.gp_elr;
348	tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
349
350	return (0);
351}
352
353static void
354get_fpcontext(struct thread *td, mcontext_t *mcp)
355{
356#ifdef VFP
357	struct pcb *curpcb;
358
359	critical_enter();
360
361	curpcb = curthread->td_pcb;
362
363	if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
364		/*
365		 * If we have just been running VFP instructions we will
366		 * need to save the state to memcpy it below.
367		 */
368		vfp_save_state(td, curpcb);
369
370		memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_vfp,
371		    sizeof(mcp->mc_fpregs));
372		mcp->mc_fpregs.fp_cr = curpcb->pcb_fpcr;
373		mcp->mc_fpregs.fp_sr = curpcb->pcb_fpsr;
374		mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
375		mcp->mc_flags |= _MC_FP_VALID;
376	}
377
378	critical_exit();
379#endif
380}
381
382static void
383set_fpcontext(struct thread *td, mcontext_t *mcp)
384{
385#ifdef VFP
386	struct pcb *curpcb;
387
388	critical_enter();
389
390	if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
391		curpcb = curthread->td_pcb;
392
393		/*
394		 * Discard any vfp state for the current thread, we
395		 * are about to override it.
396		 */
397		vfp_discard(td);
398
399		memcpy(curpcb->pcb_vfp, mcp->mc_fpregs.fp_q,
400		    sizeof(mcp->mc_fpregs));
401		curpcb->pcb_fpcr = mcp->mc_fpregs.fp_cr;
402		curpcb->pcb_fpsr = mcp->mc_fpregs.fp_sr;
403		curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags;
404	}
405
406	critical_exit();
407#endif
408}
409
410void
411cpu_idle(int busy)
412{
413
414	spinlock_enter();
415	if (!busy)
416		cpu_idleclock();
417	if (!sched_runnable())
418		__asm __volatile(
419		    "dsb sy \n"
420		    "wfi    \n");
421	if (!busy)
422		cpu_activeclock();
423	spinlock_exit();
424}
425
426void
427cpu_halt(void)
428{
429
430	/* We should have shutdown by now, if not enter a low power sleep */
431	intr_disable();
432	while (1) {
433		__asm __volatile("wfi");
434	}
435}
436
437/*
438 * Flush the D-cache for non-DMA I/O so that the I-cache can
439 * be made coherent later.
440 */
441void
442cpu_flush_dcache(void *ptr, size_t len)
443{
444
445	/* ARM64TODO TBD */
446}
447
448/* Get current clock frequency for the given CPU ID. */
449int
450cpu_est_clockrate(int cpu_id, uint64_t *rate)
451{
452	struct pcpu *pc;
453
454	pc = pcpu_find(cpu_id);
455	if (pc == NULL || rate == NULL)
456		return (EINVAL);
457
458	if (pc->pc_clock == 0)
459		return (EOPNOTSUPP);
460
461	*rate = pc->pc_clock;
462	return (0);
463}
464
465void
466cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
467{
468
469	pcpu->pc_acpi_id = 0xffffffff;
470}
471
472void
473spinlock_enter(void)
474{
475	struct thread *td;
476	register_t daif;
477
478	td = curthread;
479	if (td->td_md.md_spinlock_count == 0) {
480		daif = intr_disable();
481		td->td_md.md_spinlock_count = 1;
482		td->td_md.md_saved_daif = daif;
483	} else
484		td->td_md.md_spinlock_count++;
485	critical_enter();
486}
487
488void
489spinlock_exit(void)
490{
491	struct thread *td;
492	register_t daif;
493
494	td = curthread;
495	critical_exit();
496	daif = td->td_md.md_saved_daif;
497	td->td_md.md_spinlock_count--;
498	if (td->td_md.md_spinlock_count == 0)
499		intr_restore(daif);
500}
501
502#ifndef	_SYS_SYSPROTO_H_
503struct sigreturn_args {
504	ucontext_t *ucp;
505};
506#endif
507
508int
509sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
510{
511	ucontext_t uc;
512	int error;
513
514	if (uap == NULL)
515		return (EFAULT);
516	if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
517		return (EFAULT);
518
519	error = set_mcontext(td, &uc.uc_mcontext);
520	if (error != 0)
521		return (error);
522	set_fpcontext(td, &uc.uc_mcontext);
523
524	/* Restore signal mask. */
525	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
526
527	return (EJUSTRETURN);
528}
529
530/*
531 * Construct a PCB from a trapframe. This is called from kdb_trap() where
532 * we want to start a backtrace from the function that caused us to enter
533 * the debugger. We have the context in the trapframe, but base the trace
534 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
535 * enough for a backtrace.
536 */
537void
538makectx(struct trapframe *tf, struct pcb *pcb)
539{
540	int i;
541
542	for (i = 0; i < PCB_LR; i++)
543		pcb->pcb_x[i] = tf->tf_x[i];
544
545	pcb->pcb_x[PCB_LR] = tf->tf_lr;
546	pcb->pcb_pc = tf->tf_elr;
547	pcb->pcb_sp = tf->tf_sp;
548}
549
550void
551sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
552{
553	struct thread *td;
554	struct proc *p;
555	struct trapframe *tf;
556	struct sigframe *fp, frame;
557	struct sigacts *psp;
558	struct sysentvec *sysent;
559	int code, onstack, sig;
560
561	td = curthread;
562	p = td->td_proc;
563	PROC_LOCK_ASSERT(p, MA_OWNED);
564
565	sig = ksi->ksi_signo;
566	code = ksi->ksi_code;
567	psp = p->p_sigacts;
568	mtx_assert(&psp->ps_mtx, MA_OWNED);
569
570	tf = td->td_frame;
571	onstack = sigonstack(tf->tf_sp);
572
573	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
574	    catcher, sig);
575
576	/* Allocate and validate space for the signal handler context. */
577	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
578	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
579		fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
580		    td->td_sigstk.ss_size);
581#if defined(COMPAT_43)
582		td->td_sigstk.ss_flags |= SS_ONSTACK;
583#endif
584	} else {
585		fp = (struct sigframe *)td->td_frame->tf_sp;
586	}
587
588	/* Make room, keeping the stack aligned */
589	fp--;
590	fp = (struct sigframe *)STACKALIGN(fp);
591
592	/* Fill in the frame to copy out */
593	bzero(&frame, sizeof(frame));
594	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
595	get_fpcontext(td, &frame.sf_uc.uc_mcontext);
596	frame.sf_si = ksi->ksi_info;
597	frame.sf_uc.uc_sigmask = *mask;
598	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
599	    ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
600	frame.sf_uc.uc_stack = td->td_sigstk;
601	mtx_unlock(&psp->ps_mtx);
602	PROC_UNLOCK(td->td_proc);
603
604	/* Copy the sigframe out to the user's stack. */
605	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
606		/* Process has trashed its stack. Kill it. */
607		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
608		PROC_LOCK(p);
609		sigexit(td, SIGILL);
610	}
611
612	tf->tf_x[0]= sig;
613	tf->tf_x[1] = (register_t)&fp->sf_si;
614	tf->tf_x[2] = (register_t)&fp->sf_uc;
615
616	tf->tf_elr = (register_t)catcher;
617	tf->tf_sp = (register_t)fp;
618	sysent = p->p_sysent;
619	if (sysent->sv_sigcode_base != 0)
620		tf->tf_lr = (register_t)sysent->sv_sigcode_base;
621	else
622		tf->tf_lr = (register_t)(sysent->sv_psstrings -
623		    *(sysent->sv_szsigcode));
624
625	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
626	    tf->tf_sp);
627
628	PROC_LOCK(p);
629	mtx_lock(&psp->ps_mtx);
630}
631
632static void
633init_proc0(vm_offset_t kstack)
634{
635	struct pcpu *pcpup = &__pcpu[0];
636
637	proc_linkup0(&proc0, &thread0);
638	thread0.td_kstack = kstack;
639	thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1;
640	thread0.td_pcb->pcb_fpflags = 0;
641	thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
642	thread0.td_frame = &proc0_tf;
643	pcpup->pc_curpcb = thread0.td_pcb;
644}
645
646typedef struct {
647	uint32_t type;
648	uint64_t phys_start;
649	uint64_t virt_start;
650	uint64_t num_pages;
651	uint64_t attr;
652} EFI_MEMORY_DESCRIPTOR;
653
654static int
655add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
656    u_int *physmap_idxp)
657{
658	u_int i, insert_idx, _physmap_idx;
659
660	_physmap_idx = *physmap_idxp;
661
662	if (length == 0)
663		return (1);
664
665	/*
666	 * Find insertion point while checking for overlap.  Start off by
667	 * assuming the new entry will be added to the end.
668	 */
669	insert_idx = _physmap_idx;
670	for (i = 0; i <= _physmap_idx; i += 2) {
671		if (base < physmap[i + 1]) {
672			if (base + length <= physmap[i]) {
673				insert_idx = i;
674				break;
675			}
676			if (boothowto & RB_VERBOSE)
677				printf(
678		    "Overlapping memory regions, ignoring second region\n");
679			return (1);
680		}
681	}
682
683	/* See if we can prepend to the next entry. */
684	if (insert_idx <= _physmap_idx &&
685	    base + length == physmap[insert_idx]) {
686		physmap[insert_idx] = base;
687		return (1);
688	}
689
690	/* See if we can append to the previous entry. */
691	if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
692		physmap[insert_idx - 1] += length;
693		return (1);
694	}
695
696	_physmap_idx += 2;
697	*physmap_idxp = _physmap_idx;
698	if (_physmap_idx == PHYSMAP_SIZE) {
699		printf(
700		"Too many segments in the physical address map, giving up\n");
701		return (0);
702	}
703
704	/*
705	 * Move the last 'N' entries down to make room for the new
706	 * entry if needed.
707	 */
708	for (i = _physmap_idx; i > insert_idx; i -= 2) {
709		physmap[i] = physmap[i - 2];
710		physmap[i + 1] = physmap[i - 1];
711	}
712
713	/* Insert the new entry. */
714	physmap[insert_idx] = base;
715	physmap[insert_idx + 1] = base + length;
716	return (1);
717}
718
719#ifdef FDT
720static void
721add_fdt_mem_regions(struct mem_region *mr, int mrcnt, vm_paddr_t *physmap,
722    u_int *physmap_idxp)
723{
724
725	for (int i = 0; i < mrcnt; i++) {
726		if (!add_physmap_entry(mr[i].mr_start, mr[i].mr_size, physmap,
727		    physmap_idxp))
728			break;
729	}
730}
731#endif
732
733static void
734add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
735    u_int *physmap_idxp)
736{
737	struct efi_md *map, *p;
738	const char *type;
739	size_t efisz;
740	int ndesc, i;
741
742	static const char *types[] = {
743		"Reserved",
744		"LoaderCode",
745		"LoaderData",
746		"BootServicesCode",
747		"BootServicesData",
748		"RuntimeServicesCode",
749		"RuntimeServicesData",
750		"ConventionalMemory",
751		"UnusableMemory",
752		"ACPIReclaimMemory",
753		"ACPIMemoryNVS",
754		"MemoryMappedIO",
755		"MemoryMappedIOPortSpace",
756		"PalCode",
757		"PersistentMemory"
758	};
759
760	/*
761	 * Memory map data provided by UEFI via the GetMemoryMap
762	 * Boot Services API.
763	 */
764	efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
765	map = (struct efi_md *)((uint8_t *)efihdr + efisz);
766
767	if (efihdr->descriptor_size == 0)
768		return;
769	ndesc = efihdr->memory_size / efihdr->descriptor_size;
770
771	if (boothowto & RB_VERBOSE)
772		printf("%23s %12s %12s %8s %4s\n",
773		    "Type", "Physical", "Virtual", "#Pages", "Attr");
774
775	for (i = 0, p = map; i < ndesc; i++,
776	    p = efi_next_descriptor(p, efihdr->descriptor_size)) {
777		if (boothowto & RB_VERBOSE) {
778			if (p->md_type < nitems(types))
779				type = types[p->md_type];
780			else
781				type = "<INVALID>";
782			printf("%23s %012lx %12p %08lx ", type, p->md_phys,
783			    p->md_virt, p->md_pages);
784			if (p->md_attr & EFI_MD_ATTR_UC)
785				printf("UC ");
786			if (p->md_attr & EFI_MD_ATTR_WC)
787				printf("WC ");
788			if (p->md_attr & EFI_MD_ATTR_WT)
789				printf("WT ");
790			if (p->md_attr & EFI_MD_ATTR_WB)
791				printf("WB ");
792			if (p->md_attr & EFI_MD_ATTR_UCE)
793				printf("UCE ");
794			if (p->md_attr & EFI_MD_ATTR_WP)
795				printf("WP ");
796			if (p->md_attr & EFI_MD_ATTR_RP)
797				printf("RP ");
798			if (p->md_attr & EFI_MD_ATTR_XP)
799				printf("XP ");
800			if (p->md_attr & EFI_MD_ATTR_NV)
801				printf("NV ");
802			if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
803				printf("MORE_RELIABLE ");
804			if (p->md_attr & EFI_MD_ATTR_RO)
805				printf("RO ");
806			if (p->md_attr & EFI_MD_ATTR_RT)
807				printf("RUNTIME");
808			printf("\n");
809		}
810
811		switch (p->md_type) {
812		case EFI_MD_TYPE_CODE:
813		case EFI_MD_TYPE_DATA:
814		case EFI_MD_TYPE_BS_CODE:
815		case EFI_MD_TYPE_BS_DATA:
816		case EFI_MD_TYPE_FREE:
817			/*
818			 * We're allowed to use any entry with these types.
819			 */
820			break;
821		default:
822			continue;
823		}
824
825		if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
826		    physmap, physmap_idxp))
827			break;
828	}
829}
830
831#ifdef FDT
832static void
833try_load_dtb(caddr_t kmdp)
834{
835	vm_offset_t dtbp;
836
837	dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
838	if (dtbp == (vm_offset_t)NULL) {
839		printf("ERROR loading DTB\n");
840		return;
841	}
842
843	if (OF_install(OFW_FDT, 0) == FALSE)
844		panic("Cannot install FDT");
845
846	if (OF_init((void *)dtbp) != 0)
847		panic("OF_init failed with the found device tree");
848}
849#endif
850
851static void
852cache_setup(void)
853{
854	int dcache_line_shift, icache_line_shift, dczva_line_shift;
855	uint32_t ctr_el0;
856	uint32_t dczid_el0;
857
858	ctr_el0 = READ_SPECIALREG(ctr_el0);
859
860	/* Read the log2 words in each D cache line */
861	dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
862	/* Get the D cache line size */
863	dcache_line_size = sizeof(int) << dcache_line_shift;
864
865	/* And the same for the I cache */
866	icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
867	icache_line_size = sizeof(int) << icache_line_shift;
868
869	idcache_line_size = MIN(dcache_line_size, icache_line_size);
870
871	dczid_el0 = READ_SPECIALREG(dczid_el0);
872
873	/* Check if dc zva is not prohibited */
874	if (dczid_el0 & DCZID_DZP)
875		dczva_line_size = 0;
876	else {
877		/* Same as with above calculations */
878		dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
879		dczva_line_size = sizeof(int) << dczva_line_shift;
880
881		/* Change pagezero function */
882		pagezero = pagezero_cache;
883	}
884}
885
886void
887initarm(struct arm64_bootparams *abp)
888{
889	struct efi_map_header *efihdr;
890	struct pcpu *pcpup;
891#ifdef FDT
892	struct mem_region mem_regions[FDT_MEM_REGIONS];
893	int mem_regions_sz;
894#endif
895	vm_offset_t lastaddr;
896	caddr_t kmdp;
897	vm_paddr_t mem_len;
898	int i;
899
900	/* Set the module data location */
901	preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
902
903	/* Find the kernel address */
904	kmdp = preload_search_by_type("elf kernel");
905	if (kmdp == NULL)
906		kmdp = preload_search_by_type("elf64 kernel");
907
908	boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
909	init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0);
910
911#ifdef FDT
912	try_load_dtb(kmdp);
913#endif
914
915	/* Find the address to start allocating from */
916	lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
917
918	/* Load the physical memory ranges */
919	physmap_idx = 0;
920	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
921	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
922	if (efihdr != NULL)
923		add_efi_map_entries(efihdr, physmap, &physmap_idx);
924#ifdef FDT
925	else {
926		/* Grab physical memory regions information from device tree. */
927		if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
928		    NULL) != 0)
929			panic("Cannot get physical memory regions");
930		add_fdt_mem_regions(mem_regions, mem_regions_sz, physmap,
931		    &physmap_idx);
932	}
933#endif
934
935	/* Print the memory map */
936	mem_len = 0;
937	for (i = 0; i < physmap_idx; i += 2) {
938		dump_avail[i] = physmap[i];
939		dump_avail[i + 1] = physmap[i + 1];
940		mem_len += physmap[i + 1] - physmap[i];
941	}
942	dump_avail[i] = 0;
943	dump_avail[i + 1] = 0;
944
945	/* Set the pcpu data, this is needed by pmap_bootstrap */
946	pcpup = &__pcpu[0];
947	pcpu_init(pcpup, 0, sizeof(struct pcpu));
948
949	/*
950	 * Set the pcpu pointer with a backup in tpidr_el1 to be
951	 * loaded when entering the kernel from userland.
952	 */
953	__asm __volatile(
954	    "mov x18, %0 \n"
955	    "msr tpidr_el1, %0" :: "r"(pcpup));
956
957	PCPU_SET(curthread, &thread0);
958
959	/* Do basic tuning, hz etc */
960	init_param1();
961
962	cache_setup();
963	pan_setup();
964
965	/* Bootstrap enough of pmap  to enter the kernel proper */
966	pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
967	    KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
968
969	devmap_bootstrap(0, NULL);
970
971	cninit();
972
973	init_proc0(abp->kern_stack);
974	msgbufinit(msgbufp, msgbufsize);
975	mutex_init();
976	init_param2(physmem);
977
978	dbg_init();
979	kdb_init();
980	pan_enable();
981
982	early_boot = 0;
983}
984
985void
986dbg_init(void)
987{
988
989	/* Clear OS lock */
990	WRITE_SPECIALREG(OSLAR_EL1, 0);
991
992	/* This permits DDB to use debug registers for watchpoints. */
993	dbg_monitor_init();
994
995	/* TODO: Eventually will need to initialize debug registers here. */
996}
997
998#ifdef DDB
999#include <ddb/ddb.h>
1000
1001DB_SHOW_COMMAND(specialregs, db_show_spregs)
1002{
1003#define	PRINT_REG(reg)	\
1004    db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
1005
1006	PRINT_REG(actlr_el1);
1007	PRINT_REG(afsr0_el1);
1008	PRINT_REG(afsr1_el1);
1009	PRINT_REG(aidr_el1);
1010	PRINT_REG(amair_el1);
1011	PRINT_REG(ccsidr_el1);
1012	PRINT_REG(clidr_el1);
1013	PRINT_REG(contextidr_el1);
1014	PRINT_REG(cpacr_el1);
1015	PRINT_REG(csselr_el1);
1016	PRINT_REG(ctr_el0);
1017	PRINT_REG(currentel);
1018	PRINT_REG(daif);
1019	PRINT_REG(dczid_el0);
1020	PRINT_REG(elr_el1);
1021	PRINT_REG(esr_el1);
1022	PRINT_REG(far_el1);
1023#if 0
1024	/* ARM64TODO: Enable VFP before reading floating-point registers */
1025	PRINT_REG(fpcr);
1026	PRINT_REG(fpsr);
1027#endif
1028	PRINT_REG(id_aa64afr0_el1);
1029	PRINT_REG(id_aa64afr1_el1);
1030	PRINT_REG(id_aa64dfr0_el1);
1031	PRINT_REG(id_aa64dfr1_el1);
1032	PRINT_REG(id_aa64isar0_el1);
1033	PRINT_REG(id_aa64isar1_el1);
1034	PRINT_REG(id_aa64pfr0_el1);
1035	PRINT_REG(id_aa64pfr1_el1);
1036	PRINT_REG(id_afr0_el1);
1037	PRINT_REG(id_dfr0_el1);
1038	PRINT_REG(id_isar0_el1);
1039	PRINT_REG(id_isar1_el1);
1040	PRINT_REG(id_isar2_el1);
1041	PRINT_REG(id_isar3_el1);
1042	PRINT_REG(id_isar4_el1);
1043	PRINT_REG(id_isar5_el1);
1044	PRINT_REG(id_mmfr0_el1);
1045	PRINT_REG(id_mmfr1_el1);
1046	PRINT_REG(id_mmfr2_el1);
1047	PRINT_REG(id_mmfr3_el1);
1048#if 0
1049	/* Missing from llvm */
1050	PRINT_REG(id_mmfr4_el1);
1051#endif
1052	PRINT_REG(id_pfr0_el1);
1053	PRINT_REG(id_pfr1_el1);
1054	PRINT_REG(isr_el1);
1055	PRINT_REG(mair_el1);
1056	PRINT_REG(midr_el1);
1057	PRINT_REG(mpidr_el1);
1058	PRINT_REG(mvfr0_el1);
1059	PRINT_REG(mvfr1_el1);
1060	PRINT_REG(mvfr2_el1);
1061	PRINT_REG(revidr_el1);
1062	PRINT_REG(sctlr_el1);
1063	PRINT_REG(sp_el0);
1064	PRINT_REG(spsel);
1065	PRINT_REG(spsr_el1);
1066	PRINT_REG(tcr_el1);
1067	PRINT_REG(tpidr_el0);
1068	PRINT_REG(tpidr_el1);
1069	PRINT_REG(tpidrro_el0);
1070	PRINT_REG(ttbr0_el1);
1071	PRINT_REG(ttbr1_el1);
1072	PRINT_REG(vbar_el1);
1073#undef PRINT_REG
1074}
1075
1076DB_SHOW_COMMAND(vtop, db_show_vtop)
1077{
1078	uint64_t phys;
1079
1080	if (have_addr) {
1081		phys = arm64_address_translate_s1e1r(addr);
1082		db_printf("Physical address reg (read):  0x%016lx\n", phys);
1083		phys = arm64_address_translate_s1e1w(addr);
1084		db_printf("Physical address reg (write): 0x%016lx\n", phys);
1085	} else
1086		db_printf("show vtop <virt_addr>\n");
1087}
1088#endif
1089