machdep.c revision 1.74
1/* $OpenBSD: machdep.c,v 1.74 2022/10/04 19:41:21 kettenis Exp $ */
2/*
3 * Copyright (c) 2014 Patrick Wildt <patrick@blueri.se>
4 * Copyright (c) 2021 Mark Kettenis <kettenis@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <sys/param.h>
20#include <sys/systm.h>
21#include <sys/sched.h>
22#include <sys/proc.h>
23#include <sys/sysctl.h>
24#include <sys/reboot.h>
25#include <sys/mount.h>
26#include <sys/exec.h>
27#include <sys/user.h>
28#include <sys/conf.h>
29#include <sys/kcore.h>
30#include <sys/core.h>
31#include <sys/msgbuf.h>
32#include <sys/buf.h>
33#include <sys/termios.h>
34#include <sys/sensors.h>
35#include <sys/malloc.h>
36
37#include <net/if.h>
38#include <uvm/uvm.h>
39#include <dev/cons.h>
40#include <dev/ofw/fdt.h>
41#include <dev/ofw/openfirm.h>
42#include <machine/param.h>
43#include <machine/kcore.h>
44#include <machine/bootconfig.h>
45#include <machine/bus.h>
46#include <machine/fpu.h>
47#include <arm64/arm64/arm64var.h>
48
49#include <machine/db_machdep.h>
50#include <ddb/db_extern.h>
51
52#include <dev/efi/efi.h>
53
54#include "softraid.h"
55#if NSOFTRAID > 0
56#include <dev/softraidvar.h>
57#endif
58
59extern vaddr_t virtual_avail;
60extern uint64_t esym;
61
62extern char _start[];
63
64char *boot_args = NULL;
65uint8_t *bootmac = NULL;
66
67int stdout_node;
68int stdout_speed;
69
70void (*cpuresetfn)(void);
71void (*powerdownfn)(void);
72
73int cold = 1;
74
75struct vm_map *exec_map = NULL;
76struct vm_map *phys_map = NULL;
77
78int physmem;
79
80struct consdev *cn_tab;
81
82caddr_t msgbufaddr;
83paddr_t msgbufphys;
84
85struct user *proc0paddr;
86
87struct uvm_constraint_range  dma_constraint = { 0x0, (paddr_t)-1 };
88struct uvm_constraint_range *uvm_md_constraints[] = {
89	&dma_constraint,
90	NULL,
91};
92
93/* the following is used externally (sysctl_hw) */
94char    machine[] = MACHINE;            /* from <machine/param.h> */
95
96int safepri = 0;
97
98struct cpu_info cpu_info_primary;
99struct cpu_info *cpu_info[MAXCPUS] = { &cpu_info_primary };
100
101struct fdt_reg memreg[VM_PHYSSEG_MAX];
102int nmemreg;
103
104void memreg_add(const struct fdt_reg *);
105void memreg_remove(const struct fdt_reg *);
106
107static int
108atoi(const char *s)
109{
110	int n, neg;
111
112	n = 0;
113	neg = 0;
114
115	while (*s == '-') {
116		s++;
117		neg = !neg;
118	}
119
120	while (*s != '\0') {
121		if (*s < '0' || *s > '9')
122			break;
123
124		n = (10 * n) + (*s - '0');
125		s++;
126	}
127
128	return (neg ? -n : n);
129}
130
131void *
132fdt_find_cons(const char *name)
133{
134	char *alias = "serial0";
135	char buf[128];
136	char *stdout = NULL;
137	char *p;
138	void *node;
139
140	/* First check if "stdout-path" is set. */
141	node = fdt_find_node("/chosen");
142	if (node) {
143		if (fdt_node_property(node, "stdout-path", &stdout) > 0) {
144			if (strchr(stdout, ':') != NULL) {
145				strlcpy(buf, stdout, sizeof(buf));
146				if ((p = strchr(buf, ':')) != NULL) {
147					*p++ = '\0';
148					stdout_speed = atoi(p);
149				}
150				stdout = buf;
151			}
152			if (stdout[0] != '/') {
153				/* It's an alias. */
154				alias = stdout;
155				stdout = NULL;
156			}
157		}
158	}
159
160	/* Perform alias lookup if necessary. */
161	if (stdout == NULL) {
162		node = fdt_find_node("/aliases");
163		if (node)
164			fdt_node_property(node, alias, &stdout);
165	}
166
167	/* Lookup the physical address of the interface. */
168	if (stdout) {
169		node = fdt_find_node(stdout);
170		if (node && fdt_is_compatible(node, name)) {
171			stdout_node = OF_finddevice(stdout);
172			return (node);
173		}
174	}
175
176	return (NULL);
177}
178
179void	amluart_init_cons(void);
180void	cduart_init_cons(void);
181void	com_fdt_init_cons(void);
182void	exuart_init_cons(void);
183void	imxuart_init_cons(void);
184void	mvuart_init_cons(void);
185void	pluart_init_cons(void);
186void	simplefb_init_cons(bus_space_tag_t);
187
188void
189consinit(void)
190{
191	static int consinit_called = 0;
192
193	if (consinit_called != 0)
194		return;
195
196	consinit_called = 1;
197
198	amluart_init_cons();
199	cduart_init_cons();
200	com_fdt_init_cons();
201	exuart_init_cons();
202	imxuart_init_cons();
203	mvuart_init_cons();
204	pluart_init_cons();
205	simplefb_init_cons(&arm64_bs_tag);
206}
207
208void
209cpu_idle_enter(void)
210{
211}
212
213void
214cpu_idle_cycle(void)
215{
216	enable_irq_daif();
217	__asm volatile("dsb sy" ::: "memory");
218	__asm volatile("wfi");
219}
220
221void
222cpu_idle_leave(void)
223{
224}
225
226/* Dummy trapframe for proc0. */
227struct trapframe proc0tf;
228
229void
230cpu_startup(void)
231{
232	u_int loop;
233	paddr_t minaddr;
234	paddr_t maxaddr;
235
236	proc0.p_addr = proc0paddr;
237
238	/*
239	 * Give pmap a chance to set up a few more things now the vm
240	 * is initialised
241	 */
242	pmap_postinit();
243
244	/*
245	 * Initialize error message buffer (at end of core).
246	 */
247
248	/* msgbufphys was setup during the secondary boot strap */
249	for (loop = 0; loop < atop(MSGBUFSIZE); ++loop)
250		pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE,
251		    msgbufphys + loop * PAGE_SIZE, PROT_READ | PROT_WRITE);
252	pmap_update(pmap_kernel());
253	initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
254
255	/*
256	 * Identify ourselves for the msgbuf (everything printed earlier will
257	 * not be buffered).
258	 */
259	printf("%s", version);
260
261	printf("real mem  = %lu (%luMB)\n", ptoa(physmem),
262	    ptoa(physmem) / 1024 / 1024);
263
264	/*
265	 * Allocate a submap for exec arguments.  This map effectively
266	 * limits the number of processes exec'ing at any time.
267	 */
268	minaddr = vm_map_min(kernel_map);
269	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
270	    16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
271
272	/*
273	 * Allocate a submap for physio
274	 */
275	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
276	    VM_PHYS_SIZE, 0, FALSE, NULL);
277
278	/*
279	 * Set up buffers, so they can be used to read disk labels.
280	 */
281	bufinit();
282
283	printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free),
284	    ptoa(uvmexp.free) / 1024 / 1024);
285
286	curpcb = &proc0.p_addr->u_pcb;
287	curpcb->pcb_flags = 0;
288	curpcb->pcb_tf = &proc0tf;
289
290	if (boothowto & RB_CONFIG) {
291#ifdef BOOT_CONFIG
292		user_config();
293#else
294		printf("kernel does not support -c; continuing..\n");
295#endif
296	}
297}
298
299void    cpu_switchto_asm(struct proc *, struct proc *);
300
301void
302cpu_switchto(struct proc *old, struct proc *new)
303{
304	if (old) {
305		struct pcb *pcb = &old->p_addr->u_pcb;
306
307		if (pcb->pcb_flags & PCB_FPU)
308			fpu_save(old);
309
310		fpu_drop();
311	}
312
313	cpu_switchto_asm(old, new);
314}
315
316extern uint64_t cpu_id_aa64isar0;
317extern uint64_t cpu_id_aa64isar1;
318
319/*
320 * machine dependent system variables.
321 */
322
323int
324cpu_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
325    size_t newlen, struct proc *p)
326{
327	char *compatible;
328	int node, len, error;
329
330	/* all sysctl names at this level are terminal */
331	if (namelen != 1)
332		return (ENOTDIR);		/* overloaded */
333
334	switch (name[0]) {
335	case CPU_COMPATIBLE:
336		node = OF_finddevice("/");
337		len = OF_getproplen(node, "compatible");
338		if (len <= 0)
339			return (EOPNOTSUPP);
340		compatible = malloc(len, M_TEMP, M_WAITOK | M_ZERO);
341		OF_getprop(node, "compatible", compatible, len);
342		compatible[len - 1] = 0;
343		error = sysctl_rdstring(oldp, oldlenp, newp, compatible);
344		free(compatible, M_TEMP, len);
345		return error;
346	case CPU_ID_AA64ISAR0:
347		return sysctl_rdquad(oldp, oldlenp, newp, cpu_id_aa64isar0);
348	case CPU_ID_AA64ISAR1:
349		return sysctl_rdquad(oldp, oldlenp, newp, cpu_id_aa64isar1);
350	default:
351		return (EOPNOTSUPP);
352	}
353	/* NOTREACHED */
354}
355
356void dumpsys(void);
357
358int	waittime = -1;
359
360__dead void
361boot(int howto)
362{
363	if ((howto & RB_RESET) != 0)
364		goto doreset;
365
366	if (cold) {
367		if ((howto & RB_USERREQ) == 0)
368			howto |= RB_HALT;
369		goto haltsys;
370	}
371
372	boothowto = howto;
373	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
374		waittime = 0;
375		vfs_shutdown(curproc);
376
377		if ((howto & RB_TIMEBAD) == 0) {
378			resettodr();
379		} else {
380			printf("WARNING: not updating battery clock\n");
381		}
382	}
383	if_downall();
384
385	uvm_shutdown();
386	splhigh();
387	cold = 1;
388
389	if ((howto & RB_DUMP) != 0)
390		dumpsys();
391
392haltsys:
393	config_suspend_all(DVACT_POWERDOWN);
394
395	if ((howto & RB_HALT) != 0) {
396		if ((howto & RB_POWERDOWN) != 0) {
397			printf("\nAttempting to power down...\n");
398			delay(500000);
399			if (powerdownfn)
400				(*powerdownfn)();
401		}
402
403		printf("\n");
404		printf("The operating system has halted.\n");
405		printf("Please press any key to reboot.\n\n");
406		cngetc();
407	}
408
409doreset:
410	printf("rebooting...\n");
411	delay(500000);
412	if (cpuresetfn)
413		(*cpuresetfn)();
414	printf("reboot failed; spinning\n");
415	for (;;)
416		continue;
417	/* NOTREACHED */
418}
419
420void
421setregs(struct proc *p, struct exec_package *pack, u_long stack,
422    register_t *retval)
423{
424	struct pcb *pcb = &p->p_addr->u_pcb;
425	struct trapframe *tf = pcb->pcb_tf;
426
427	/* If we were using the FPU, forget about it. */
428	memset(&pcb->pcb_fpstate, 0, sizeof(pcb->pcb_fpstate));
429	pcb->pcb_flags &= ~PCB_FPU;
430	fpu_drop();
431
432	memset (tf,0, sizeof(*tf));
433	tf->tf_sp = stack;
434	tf->tf_lr = pack->ep_entry;
435	tf->tf_elr = pack->ep_entry; /* ??? */
436	tf->tf_spsr = PSR_M_EL0t | PSR_DIT;
437
438	retval[1] = 0;
439}
440
441void
442need_resched(struct cpu_info *ci)
443{
444	ci->ci_want_resched = 1;
445
446	/* There's a risk we'll be called before the idle threads start */
447	if (ci->ci_curproc) {
448		aston(ci->ci_curproc);
449		cpu_kick(ci);
450	}
451}
452
453int	cpu_dumpsize(void);
454u_long	cpu_dump_mempagecnt(void);
455
456paddr_t dumpmem_paddr;
457vaddr_t dumpmem_vaddr;
458psize_t dumpmem_sz;
459
460/*
461 * These variables are needed by /sbin/savecore
462 */
463u_long	dumpmag = 0x8fca0101;	/* magic number */
464int 	dumpsize = 0;		/* pages */
465long	dumplo = 0; 		/* blocks */
466
467/*
468 * cpu_dump: dump the machine-dependent kernel core dump headers.
469 */
470int
471cpu_dump(void)
472{
473	int (*dump)(dev_t, daddr_t, caddr_t, size_t);
474	char buf[dbtob(1)];
475	kcore_seg_t *segp;
476	cpu_kcore_hdr_t *cpuhdrp;
477	phys_ram_seg_t *memsegp;
478#if 0
479	caddr_t va;
480	int i;
481#endif
482
483	dump = bdevsw[major(dumpdev)].d_dump;
484
485	memset(buf, 0, sizeof buf);
486	segp = (kcore_seg_t *)buf;
487	cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))];
488	memsegp = (phys_ram_seg_t *)&buf[ALIGN(sizeof(*segp)) +
489	    ALIGN(sizeof(*cpuhdrp))];
490
491	/*
492	 * Generate a segment header.
493	 */
494	CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
495	segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
496
497	/*
498	 * Add the machine-dependent header info.
499	 */
500	cpuhdrp->kernelbase = KERNEL_BASE;
501	cpuhdrp->kerneloffs = 0;
502	cpuhdrp->staticsize = 0;
503	cpuhdrp->pmap_kernel_l1 = 0;
504	cpuhdrp->pmap_kernel_l2 = 0;
505
506#if 0
507	/*
508	 * Fill in the memory segment descriptors.
509	 */
510	for (i = 0; i < mem_cluster_cnt; i++) {
511		memsegp[i].start = mem_clusters[i].start;
512		memsegp[i].size = mem_clusters[i].size & PMAP_PA_MASK;
513	}
514
515	/*
516	 * If we have dump memory then assume the kernel stack is in high
517	 * memory and bounce
518	 */
519	if (dumpmem_vaddr != 0) {
520		memcpy((char *)dumpmem_vaddr, buf, sizeof(buf));
521		va = (caddr_t)dumpmem_vaddr;
522	} else {
523		va = (caddr_t)buf;
524	}
525	return (dump(dumpdev, dumplo, va, dbtob(1)));
526#else
527	return ENOSYS;
528#endif
529}
530
531/*
532 * This is called by main to set dumplo and dumpsize.
533 * Dumps always skip the first PAGE_SIZE of disk space
534 * in case there might be a disk label stored there.
535 * If there is extra space, put dump at the end to
536 * reduce the chance that swapping trashes it.
537 */
538void
539dumpconf(void)
540{
541	int nblks, dumpblks;	/* size of dump area */
542
543	if (dumpdev == NODEV ||
544	    (nblks = (bdevsw[major(dumpdev)].d_psize)(dumpdev)) == 0)
545		return;
546	if (nblks <= ctod(1))
547		return;
548
549	dumpblks = cpu_dumpsize();
550	if (dumpblks < 0)
551		return;
552	dumpblks += ctod(cpu_dump_mempagecnt());
553
554	/* If dump won't fit (incl. room for possible label), punt. */
555	if (dumpblks > (nblks - ctod(1)))
556		return;
557
558	/* Put dump at end of partition */
559	dumplo = nblks - dumpblks;
560
561	/* dumpsize is in page units, and doesn't include headers. */
562	dumpsize = cpu_dump_mempagecnt();
563}
564
565/*
566 * Doadump comes here after turning off memory management and
567 * getting on the dump stack, either when called above, or by
568 * the auto-restart code.
569 */
570#define BYTES_PER_DUMP  MAXPHYS /* must be a multiple of pagesize */
571
572void
573dumpsys(void)
574{
575	u_long totalbytesleft, bytes, i, n, memseg;
576	u_long maddr;
577	daddr_t blkno;
578	void *va;
579	int (*dump)(dev_t, daddr_t, caddr_t, size_t);
580	int error;
581
582#if 0
583	/* Save registers. */
584	savectx(&dumppcb);
585#endif
586
587	if (dumpdev == NODEV)
588		return;
589
590	/*
591	 * For dumps during autoconfiguration,
592	 * if dump device has already configured...
593	 */
594	if (dumpsize == 0)
595		dumpconf();
596	if (dumplo <= 0 || dumpsize == 0) {
597		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
598		    minor(dumpdev));
599		return;
600	}
601	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
602	    minor(dumpdev), dumplo);
603
604#ifdef UVM_SWAP_ENCRYPT
605	uvm_swap_finicrypt_all();
606#endif
607
608	error = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
609	printf("dump ");
610	if (error == -1) {
611		printf("area unavailable\n");
612		return;
613	}
614
615	if ((error = cpu_dump()) != 0)
616		goto err;
617
618	totalbytesleft = ptoa(cpu_dump_mempagecnt());
619	blkno = dumplo + cpu_dumpsize();
620	dump = bdevsw[major(dumpdev)].d_dump;
621	error = 0;
622
623	bytes = n = i = memseg = 0;
624	maddr = 0;
625	va = 0;
626#if 0
627	for (memseg = 0; memseg < mem_cluster_cnt; memseg++) {
628		maddr = mem_clusters[memseg].start;
629		bytes = mem_clusters[memseg].size;
630
631		for (i = 0; i < bytes; i += n, totalbytesleft -= n) {
632			/* Print out how many MBs we have left to go. */
633			if ((totalbytesleft % (1024*1024)) < BYTES_PER_DUMP)
634				printf("%ld ", totalbytesleft / (1024 * 1024));
635
636			/* Limit size for next transfer. */
637			n = bytes - i;
638			if (n > BYTES_PER_DUMP)
639				n = BYTES_PER_DUMP;
640			if (maddr > 0xffffffff) {
641				va = (void *)dumpmem_vaddr;
642				if (n > dumpmem_sz)
643					n = dumpmem_sz;
644				memcpy(va, (void *)PMAP_DIRECT_MAP(maddr), n);
645			} else {
646				va = (void *)PMAP_DIRECT_MAP(maddr);
647			}
648
649			error = (*dump)(dumpdev, blkno, va, n);
650			if (error)
651				goto err;
652			maddr += n;
653			blkno += btodb(n);		/* XXX? */
654
655#if 0	/* XXX this doesn't work.  grr. */
656			/* operator aborting dump? */
657			if (sget() != NULL) {
658				error = EINTR;
659				break;
660			}
661#endif
662		}
663	}
664#endif
665
666 err:
667	switch (error) {
668
669	case ENXIO:
670		printf("device bad\n");
671		break;
672
673	case EFAULT:
674		printf("device not ready\n");
675		break;
676
677	case EINVAL:
678		printf("area improper\n");
679		break;
680
681	case EIO:
682		printf("i/o error\n");
683		break;
684
685	case EINTR:
686		printf("aborted from console\n");
687		break;
688
689	case 0:
690		printf("succeeded\n");
691		break;
692
693	default:
694		printf("error %d\n", error);
695		break;
696	}
697	printf("\n\n");
698	delay(5000000);		/* 5 seconds */
699}
700
701
702/*
703 * Size of memory segments, before any memory is stolen.
704 */
705phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX];
706int     mem_cluster_cnt;
707
708/*
709 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
710 */
711int
712cpu_dumpsize(void)
713{
714	int size;
715
716	size = ALIGN(sizeof(kcore_seg_t)) +
717	    ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
718	if (roundup(size, dbtob(1)) != dbtob(1))
719		return (-1);
720
721	return (1);
722}
723
724u_long
725cpu_dump_mempagecnt(void)
726{
727	return 0;
728}
729
730int64_t dcache_line_size;	/* The minimum D cache line size */
731int64_t icache_line_size;	/* The minimum I cache line size */
732int64_t idcache_line_size;	/* The minimum cache line size */
733int64_t dczva_line_size;	/* The size of cache line the dc zva zeroes */
734
735void
736cache_setup(void)
737{
738	int dcache_line_shift, icache_line_shift, dczva_line_shift;
739	uint32_t ctr_el0;
740	uint32_t dczid_el0;
741
742	ctr_el0 = READ_SPECIALREG(ctr_el0);
743
744	/* Read the log2 words in each D cache line */
745	dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
746	/* Get the D cache line size */
747	dcache_line_size = sizeof(int) << dcache_line_shift;
748
749	/* And the same for the I cache */
750	icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
751	icache_line_size = sizeof(int) << icache_line_shift;
752
753	idcache_line_size = MIN(dcache_line_size, icache_line_size);
754
755	dczid_el0 = READ_SPECIALREG(dczid_el0);
756
757	/* Check if dc zva is not prohibited */
758	if (dczid_el0 & DCZID_DZP)
759		dczva_line_size = 0;
760	else {
761		/* Same as with above calculations */
762		dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
763		dczva_line_size = sizeof(int) << dczva_line_shift;
764	}
765}
766
767uint64_t mmap_start;
768uint32_t mmap_size;
769uint32_t mmap_desc_size;
770uint32_t mmap_desc_ver;
771
772EFI_MEMORY_DESCRIPTOR *mmap;
773
774void	collect_kernel_args(const char *);
775void	process_kernel_args(void);
776
777int	pmap_bootstrap_bs_map(bus_space_tag_t, bus_addr_t,
778	    bus_size_t, int, bus_space_handle_t *);
779
780void
781initarm(struct arm64_bootparams *abp)
782{
783	long kernbase = (long)_start & ~PAGE_MASK;
784	long kvo = abp->kern_delta;
785	paddr_t memstart, memend;
786	vaddr_t vstart;
787	void *config = abp->arg2;
788	void *fdt = NULL;
789	struct fdt_reg reg;
790	void *node;
791	EFI_PHYSICAL_ADDRESS system_table = 0;
792	int (*map_func_save)(bus_space_tag_t, bus_addr_t, bus_size_t, int,
793	    bus_space_handle_t *);
794	int i;
795
796	/*
797	 * Set the per-CPU pointer with a backup in tpidr_el1 to be
798	 * loaded when entering the kernel from userland.
799	 */
800	__asm volatile("mov x18, %0\n"
801	    "msr tpidr_el1, %0" :: "r"(&cpu_info_primary));
802
803	pmap_map_early((paddr_t)config, PAGE_SIZE);
804	if (!fdt_init(config) || fdt_get_size(config) == 0)
805		panic("initarm: no FDT");
806	pmap_map_early((paddr_t)config, round_page(fdt_get_size(config)));
807
808	node = fdt_find_node("/chosen");
809	if (node != NULL) {
810		char *prop;
811		int len;
812		static uint8_t lladdr[6];
813
814		len = fdt_node_property(node, "bootargs", &prop);
815		if (len > 0)
816			collect_kernel_args(prop);
817
818		len = fdt_node_property(node, "openbsd,boothowto", &prop);
819		if (len == sizeof(boothowto))
820			boothowto = bemtoh32((uint32_t *)prop);
821
822		len = fdt_node_property(node, "openbsd,bootduid", &prop);
823		if (len == sizeof(bootduid))
824			memcpy(bootduid, prop, sizeof(bootduid));
825
826		len = fdt_node_property(node, "openbsd,bootmac", &prop);
827		if (len == sizeof(lladdr)) {
828			memcpy(lladdr, prop, sizeof(lladdr));
829			bootmac = lladdr;
830		}
831
832		len = fdt_node_property(node, "openbsd,sr-bootuuid", &prop);
833#if NSOFTRAID > 0
834		if (len == sizeof(sr_bootuuid))
835			memcpy(&sr_bootuuid, prop, sizeof(sr_bootuuid));
836#endif
837		if (len > 0)
838			explicit_bzero(prop, len);
839
840		len = fdt_node_property(node, "openbsd,sr-bootkey", &prop);
841#if NSOFTRAID > 0
842		if (len == sizeof(sr_bootkey))
843			memcpy(&sr_bootkey, prop, sizeof(sr_bootkey));
844#endif
845		if (len > 0)
846			explicit_bzero(prop, len);
847
848		len = fdt_node_property(node, "openbsd,uefi-mmap-start", &prop);
849		if (len == sizeof(mmap_start))
850			mmap_start = bemtoh64((uint64_t *)prop);
851		len = fdt_node_property(node, "openbsd,uefi-mmap-size", &prop);
852		if (len == sizeof(mmap_size))
853			mmap_size = bemtoh32((uint32_t *)prop);
854		len = fdt_node_property(node, "openbsd,uefi-mmap-desc-size", &prop);
855		if (len == sizeof(mmap_desc_size))
856			mmap_desc_size = bemtoh32((uint32_t *)prop);
857		len = fdt_node_property(node, "openbsd,uefi-mmap-desc-ver", &prop);
858		if (len == sizeof(mmap_desc_ver))
859			mmap_desc_ver = bemtoh32((uint32_t *)prop);
860
861		len = fdt_node_property(node, "openbsd,uefi-system-table", &prop);
862		if (len == sizeof(system_table))
863			system_table = bemtoh64((uint64_t *)prop);
864
865		len = fdt_node_property(node, "openbsd,dma-constraint", &prop);
866		if (len == sizeof(dma_constraint)) {
867			dma_constraint.ucr_low = bemtoh64((uint64_t *)prop);
868			dma_constraint.ucr_high = bemtoh64((uint64_t *)prop + 1);
869		}
870	}
871
872	cache_setup();
873
874	process_kernel_args();
875
876	/* The bootloader has loaded us into a 64MB block. */
877	memstart = KERNBASE + kvo;
878	memend = memstart + 64 * 1024 * 1024;
879
880	/* Bootstrap enough of pmap to enter the kernel proper. */
881	vstart = pmap_bootstrap(kvo, abp->kern_l1pt,
882	    kernbase, esym, memstart, memend);
883
884	proc0paddr = (struct user *)abp->kern_stack;
885
886	msgbufaddr = (caddr_t)vstart;
887	msgbufphys = pmap_steal_avail(round_page(MSGBUFSIZE), PAGE_SIZE, NULL);
888	vstart += round_page(MSGBUFSIZE);
889
890	zero_page = vstart;
891	vstart += MAXCPUS * PAGE_SIZE;
892	copy_src_page = vstart;
893	vstart += MAXCPUS * PAGE_SIZE;
894	copy_dst_page = vstart;
895	vstart += MAXCPUS * PAGE_SIZE;
896
897	/* Relocate the FDT to safe memory. */
898	if (fdt_get_size(config) != 0) {
899		uint32_t csize, size = round_page(fdt_get_size(config));
900		paddr_t pa;
901		vaddr_t va;
902
903		pa = pmap_steal_avail(size, PAGE_SIZE, NULL);
904		memcpy((void *)pa, config, size); /* copy to physical */
905		for (va = vstart, csize = size; csize > 0;
906		    csize -= PAGE_SIZE, va += PAGE_SIZE, pa += PAGE_SIZE)
907			pmap_kenter_cache(va, pa, PROT_READ, PMAP_CACHE_WB);
908
909		fdt = (void *)vstart;
910		vstart += size;
911	}
912
913	/* Relocate the EFI memory map too. */
914	if (mmap_start != 0) {
915		uint32_t csize, size = round_page(mmap_size);
916		paddr_t pa, startpa, endpa;
917		vaddr_t va;
918
919		startpa = trunc_page(mmap_start);
920		endpa = round_page(mmap_start + mmap_size);
921		for (pa = startpa, va = vstart; pa < endpa;
922		    pa += PAGE_SIZE, va += PAGE_SIZE)
923			pmap_kenter_cache(va, pa, PROT_READ, PMAP_CACHE_WB);
924		pa = pmap_steal_avail(size, PAGE_SIZE, NULL);
925		memcpy((void *)pa, (caddr_t)vstart + (mmap_start - startpa),
926		    mmap_size); /* copy to physical */
927		pmap_kremove(vstart, endpa - startpa);
928
929		for (va = vstart, csize = size; csize > 0;
930		    csize -= PAGE_SIZE, va += PAGE_SIZE, pa += PAGE_SIZE)
931			pmap_kenter_cache(va, pa, PROT_READ | PROT_WRITE, PMAP_CACHE_WB);
932
933		mmap = (void *)vstart;
934		vstart += size;
935	}
936
937	/* No more KVA stealing after this point. */
938	virtual_avail = vstart;
939
940	/* Now we can reinit the FDT, using the virtual address. */
941	if (fdt)
942		fdt_init(fdt);
943
944	map_func_save = arm64_bs_tag._space_map;
945	arm64_bs_tag._space_map = pmap_bootstrap_bs_map;
946
947	consinit();
948
949	arm64_bs_tag._space_map = map_func_save;
950
951	pmap_avail_fixup();
952
953	uvmexp.pagesize = PAGE_SIZE;
954	uvm_setpagesize();
955
956	/* Make what's left of the initial 64MB block available to UVM. */
957	pmap_physload_avail();
958
959	/* Make all other physical memory available to UVM. */
960	if (mmap && mmap_desc_ver == EFI_MEMORY_DESCRIPTOR_VERSION) {
961		EFI_MEMORY_DESCRIPTOR *desc = mmap;
962
963		/*
964		 * Load all memory marked as EfiConventionalMemory,
965		 * EfiBootServicesCode or EfiBootServicesData.
966		 * The initial 64MB memory block should be marked as
967		 * EfiLoaderData so it won't be added here.
968		 */
969		for (i = 0; i < mmap_size / mmap_desc_size; i++) {
970#ifdef MMAP_DEBUG
971			printf("type 0x%x pa 0x%llx va 0x%llx pages 0x%llx attr 0x%llx\n",
972			    desc->Type, desc->PhysicalStart,
973			    desc->VirtualStart, desc->NumberOfPages,
974			    desc->Attribute);
975#endif
976			if (desc->Type == EfiConventionalMemory ||
977			    desc->Type == EfiBootServicesCode ||
978			    desc->Type == EfiBootServicesData) {
979				reg.addr = desc->PhysicalStart;
980				reg.size = ptoa(desc->NumberOfPages);
981				memreg_add(&reg);
982			}
983			desc = NextMemoryDescriptor(desc, mmap_desc_size);
984		}
985	} else {
986		node = fdt_find_node("/memory");
987		if (node == NULL)
988			panic("%s: no memory specified", __func__);
989
990		for (i = 0; nmemreg < nitems(memreg); i++) {
991			if (fdt_get_reg(node, i, &reg))
992				break;
993			if (reg.size == 0)
994				continue;
995			memreg_add(&reg);
996		}
997	}
998
999	/* Remove the initial 64MB block. */
1000	reg.addr = memstart;
1001	reg.size = memend - memstart;
1002	memreg_remove(&reg);
1003
1004	for (i = 0; i < nmemreg; i++) {
1005		paddr_t start = memreg[i].addr;
1006		paddr_t end = start + memreg[i].size;
1007
1008		uvm_page_physload(atop(start), atop(end),
1009		    atop(start), atop(end), 0);
1010		physmem += atop(end - start);
1011	}
1012
1013	/*
1014	 * Make sure that we have enough KVA to initialize UVM.  In
1015	 * particular, we need enough KVA to be able to allocate the
1016	 * vm_page structures.
1017	 */
1018	pmap_growkernel(VM_MIN_KERNEL_ADDRESS + 1024 * 1024 * 1024 +
1019	    physmem * sizeof(struct vm_page));
1020
1021#ifdef DDB
1022	db_machine_init();
1023
1024	/* Firmware doesn't load symbols. */
1025	ddb_init();
1026
1027	if (boothowto & RB_KDB)
1028		db_enter();
1029#endif
1030
1031	softintr_init();
1032	splraise(IPL_IPI);
1033}
1034
1035char bootargs[256];
1036
1037void
1038collect_kernel_args(const char *args)
1039{
1040	/* Make a local copy of the bootargs */
1041	strlcpy(bootargs, args, sizeof(bootargs));
1042}
1043
1044void
1045process_kernel_args(void)
1046{
1047	char *cp = bootargs;
1048
1049	if (*cp == 0)
1050		return;
1051
1052	/* Skip the kernel image filename */
1053	while (*cp != ' ' && *cp != 0)
1054		cp++;
1055
1056	if (*cp != 0)
1057		*cp++ = 0;
1058
1059	while (*cp == ' ')
1060		cp++;
1061
1062	boot_args = cp;
1063
1064	printf("bootargs: %s\n", boot_args);
1065
1066	/* Setup pointer to boot flags */
1067	while (*cp != '-')
1068		if (*cp++ == '\0')
1069			return;
1070
1071	while (*cp != 0) {
1072		switch (*cp) {
1073		case 'a':
1074			boothowto |= RB_ASKNAME;
1075			break;
1076		case 'c':
1077			boothowto |= RB_CONFIG;
1078			break;
1079		case 'd':
1080			boothowto |= RB_KDB;
1081			break;
1082		case 's':
1083			boothowto |= RB_SINGLE;
1084			break;
1085		default:
1086			printf("unknown option `%c'\n", *cp);
1087			break;
1088		}
1089		cp++;
1090	}
1091}
1092
1093/*
1094 * Allow bootstrap to steal KVA after machdep has given it back to pmap.
1095 */
1096int
1097pmap_bootstrap_bs_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size,
1098    int flags, bus_space_handle_t *bshp)
1099{
1100	u_long startpa, pa, endpa;
1101	vaddr_t va;
1102
1103	va = virtual_avail;	/* steal memory from virtual avail. */
1104
1105	startpa = trunc_page(bpa);
1106	endpa = round_page((bpa + size));
1107
1108	*bshp = (bus_space_handle_t)(va + (bpa - startpa));
1109
1110	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE)
1111		pmap_kenter_cache(va, pa, PROT_READ | PROT_WRITE,
1112		    PMAP_CACHE_DEV_NGNRNE);
1113
1114	virtual_avail = va;
1115
1116	return 0;
1117}
1118
1119void
1120memreg_add(const struct fdt_reg *reg)
1121{
1122	int i;
1123
1124	for (i = 0; i < nmemreg; i++) {
1125		if (reg->addr == memreg[i].addr + memreg[i].size) {
1126			memreg[i].size += reg->size;
1127			return;
1128		}
1129		if (reg->addr + reg->size == memreg[i].addr) {
1130			memreg[i].addr = reg->addr;
1131			memreg[i].size += reg->size;
1132			return;
1133		}
1134	}
1135
1136	if (nmemreg >= nitems(memreg))
1137		return;
1138
1139	memreg[nmemreg++] = *reg;
1140}
1141
1142void
1143memreg_remove(const struct fdt_reg *reg)
1144{
1145	uint64_t start = reg->addr;
1146	uint64_t end = reg->addr + reg->size;
1147	int i, j;
1148
1149	for (i = 0; i < nmemreg; i++) {
1150		uint64_t memstart = memreg[i].addr;
1151		uint64_t memend = memreg[i].addr + memreg[i].size;
1152
1153		if (end <= memstart)
1154			continue;
1155		if (start >= memend)
1156			continue;
1157
1158		if (start <= memstart)
1159			memstart = MIN(end, memend);
1160		if (end >= memend)
1161			memend = MAX(start, memstart);
1162
1163		if (start > memstart && end < memend) {
1164			if (nmemreg < nitems(memreg)) {
1165				memreg[nmemreg].addr = end;
1166				memreg[nmemreg].size = memend - end;
1167				nmemreg++;
1168			}
1169			memend = start;
1170		}
1171		memreg[i].addr = memstart;
1172		memreg[i].size = memend - memstart;
1173	}
1174
1175	/* Remove empty slots. */
1176	for (i = nmemreg - 1; i >= 0; i--) {
1177		if (memreg[i].size == 0) {
1178			for (j = i; (j + 1) < nmemreg; j++)
1179				memreg[j] = memreg[j + 1];
1180			nmemreg--;
1181		}
1182	}
1183}
1184