1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * Copyright (c) 2015-2017 Ruslan Bukin <br@bsdpad.com>
4 * All rights reserved.
5 *
6 * Portions of this software were developed by SRI International and the
7 * University of Cambridge Computer Laboratory under DARPA/AFRL contract
8 * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
9 *
10 * Portions of this software were developed by the University of Cambridge
11 * Computer Laboratory as part of the CTSRD Project, with support from the
12 * UK Higher Education Innovation Fund (HEIF).
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 *    notice, this list of conditions and the following disclaimer in the
21 *    documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36#include "opt_platform.h"
37
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD$");
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/boot.h>
44#include <sys/buf.h>
45#include <sys/bus.h>
46#include <sys/cons.h>
47#include <sys/cpu.h>
48#include <sys/devmap.h>
49#include <sys/exec.h>
50#include <sys/imgact.h>
51#include <sys/kdb.h>
52#include <sys/kernel.h>
53#include <sys/ktr.h>
54#include <sys/limits.h>
55#include <sys/linker.h>
56#include <sys/msgbuf.h>
57#include <sys/pcpu.h>
58#include <sys/physmem.h>
59#include <sys/proc.h>
60#include <sys/ptrace.h>
61#include <sys/reboot.h>
62#include <sys/rwlock.h>
63#include <sys/sched.h>
64#include <sys/signalvar.h>
65#include <sys/syscallsubr.h>
66#include <sys/sysent.h>
67#include <sys/sysproto.h>
68#include <sys/tslog.h>
69#include <sys/ucontext.h>
70#include <sys/vmmeter.h>
71
72#include <vm/vm.h>
73#include <vm/vm_param.h>
74#include <vm/vm_kern.h>
75#include <vm/vm_object.h>
76#include <vm/vm_page.h>
77#include <vm/vm_phys.h>
78#include <vm/pmap.h>
79#include <vm/vm_map.h>
80#include <vm/vm_pager.h>
81
82#include <machine/cpu.h>
83#include <machine/intr.h>
84#include <machine/kdb.h>
85#include <machine/machdep.h>
86#include <machine/metadata.h>
87#include <machine/pcb.h>
88#include <machine/pte.h>
89#include <machine/reg.h>
90#include <machine/riscvreg.h>
91#include <machine/sbi.h>
92#include <machine/trap.h>
93#include <machine/vmparam.h>
94
95#ifdef FPE
96#include <machine/fpe.h>
97#endif
98
99#ifdef FDT
100#include <contrib/libfdt/libfdt.h>
101#include <dev/fdt/fdt_common.h>
102#include <dev/ofw/openfirm.h>
103#endif
104
105static void get_fpcontext(struct thread *td, mcontext_t *mcp);
106static void set_fpcontext(struct thread *td, mcontext_t *mcp);
107
108struct pcpu __pcpu[MAXCPU];
109
110static struct trapframe proc0_tf;
111
112int early_boot = 1;
113int cold = 1;
114
115#define	DTB_SIZE_MAX	(1024 * 1024)
116
117vm_paddr_t physmap[PHYS_AVAIL_ENTRIES];
118u_int physmap_idx;
119
120struct kva_md_info kmi;
121
122int64_t dcache_line_size;	/* The minimum D cache line size */
123int64_t icache_line_size;	/* The minimum I cache line size */
124int64_t idcache_line_size;	/* The minimum cache line size */
125
126#define BOOT_HART_INVALID	0xffffffff
127uint32_t boot_hart = BOOT_HART_INVALID;	/* The hart we booted on. */
128
129cpuset_t all_harts;
130
131extern int *end;
132
133static char static_kenv[PAGE_SIZE];
134
135static void
136cpu_startup(void *dummy)
137{
138
139	sbi_print_version();
140	identify_cpu();
141
142	printf("real memory  = %ju (%ju MB)\n", ptoa((uintmax_t)realmem),
143	    ptoa((uintmax_t)realmem) / (1024 * 1024));
144
145	/*
146	 * Display any holes after the first chunk of extended memory.
147	 */
148	if (bootverbose) {
149		int indx;
150
151		printf("Physical memory chunk(s):\n");
152		for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
153			vm_paddr_t size;
154
155			size = phys_avail[indx + 1] - phys_avail[indx];
156			printf(
157			    "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
158			    (uintmax_t)phys_avail[indx],
159			    (uintmax_t)phys_avail[indx + 1] - 1,
160			    (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
161		}
162	}
163
164	vm_ksubmap_init(&kmi);
165
166	printf("avail memory = %ju (%ju MB)\n",
167	    ptoa((uintmax_t)vm_free_count()),
168	    ptoa((uintmax_t)vm_free_count()) / (1024 * 1024));
169	if (bootverbose)
170		devmap_print_table();
171
172	bufinit();
173	vm_pager_bufferinit();
174}
175
176SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
177
178int
179cpu_idle_wakeup(int cpu)
180{
181
182	return (0);
183}
184
185int
186fill_regs(struct thread *td, struct reg *regs)
187{
188	struct trapframe *frame;
189
190	frame = td->td_frame;
191	regs->sepc = frame->tf_sepc;
192	regs->sstatus = frame->tf_sstatus;
193	regs->ra = frame->tf_ra;
194	regs->sp = frame->tf_sp;
195	regs->gp = frame->tf_gp;
196	regs->tp = frame->tf_tp;
197
198	memcpy(regs->t, frame->tf_t, sizeof(regs->t));
199	memcpy(regs->s, frame->tf_s, sizeof(regs->s));
200	memcpy(regs->a, frame->tf_a, sizeof(regs->a));
201
202	return (0);
203}
204
205int
206set_regs(struct thread *td, struct reg *regs)
207{
208	struct trapframe *frame;
209
210	frame = td->td_frame;
211	frame->tf_sepc = regs->sepc;
212	frame->tf_ra = regs->ra;
213	frame->tf_sp = regs->sp;
214	frame->tf_gp = regs->gp;
215	frame->tf_tp = regs->tp;
216
217	memcpy(frame->tf_t, regs->t, sizeof(frame->tf_t));
218	memcpy(frame->tf_s, regs->s, sizeof(frame->tf_s));
219	memcpy(frame->tf_a, regs->a, sizeof(frame->tf_a));
220
221	return (0);
222}
223
224int
225fill_fpregs(struct thread *td, struct fpreg *regs)
226{
227#ifdef FPE
228	struct pcb *pcb;
229
230	pcb = td->td_pcb;
231
232	if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
233		/*
234		 * If we have just been running FPE instructions we will
235		 * need to save the state to memcpy it below.
236		 */
237		if (td == curthread)
238			fpe_state_save(td);
239
240		memcpy(regs->fp_x, pcb->pcb_x, sizeof(regs->fp_x));
241		regs->fp_fcsr = pcb->pcb_fcsr;
242	} else
243#endif
244		memset(regs, 0, sizeof(*regs));
245
246	return (0);
247}
248
249int
250set_fpregs(struct thread *td, struct fpreg *regs)
251{
252#ifdef FPE
253	struct trapframe *frame;
254	struct pcb *pcb;
255
256	frame = td->td_frame;
257	pcb = td->td_pcb;
258
259	memcpy(pcb->pcb_x, regs->fp_x, sizeof(regs->fp_x));
260	pcb->pcb_fcsr = regs->fp_fcsr;
261	pcb->pcb_fpflags |= PCB_FP_STARTED;
262	frame->tf_sstatus &= ~SSTATUS_FS_MASK;
263	frame->tf_sstatus |= SSTATUS_FS_CLEAN;
264#endif
265
266	return (0);
267}
268
269int
270fill_dbregs(struct thread *td, struct dbreg *regs)
271{
272
273	panic("fill_dbregs");
274}
275
276int
277set_dbregs(struct thread *td, struct dbreg *regs)
278{
279
280	panic("set_dbregs");
281}
282
283int
284ptrace_set_pc(struct thread *td, u_long addr)
285{
286
287	td->td_frame->tf_sepc = addr;
288	return (0);
289}
290
291int
292ptrace_single_step(struct thread *td)
293{
294
295	/* TODO; */
296	return (EOPNOTSUPP);
297}
298
299int
300ptrace_clear_single_step(struct thread *td)
301{
302
303	/* TODO; */
304	return (EOPNOTSUPP);
305}
306
307void
308exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
309{
310	struct trapframe *tf;
311	struct pcb *pcb;
312
313	tf = td->td_frame;
314	pcb = td->td_pcb;
315
316	memset(tf, 0, sizeof(struct trapframe));
317
318	tf->tf_a[0] = stack;
319	tf->tf_sp = STACKALIGN(stack);
320	tf->tf_ra = imgp->entry_addr;
321	tf->tf_sepc = imgp->entry_addr;
322
323	pcb->pcb_fpflags &= ~PCB_FP_STARTED;
324}
325
326/* Sanity check these are the same size, they will be memcpy'd to and fro */
327CTASSERT(sizeof(((struct trapframe *)0)->tf_a) ==
328    sizeof((struct gpregs *)0)->gp_a);
329CTASSERT(sizeof(((struct trapframe *)0)->tf_s) ==
330    sizeof((struct gpregs *)0)->gp_s);
331CTASSERT(sizeof(((struct trapframe *)0)->tf_t) ==
332    sizeof((struct gpregs *)0)->gp_t);
333CTASSERT(sizeof(((struct trapframe *)0)->tf_a) ==
334    sizeof((struct reg *)0)->a);
335CTASSERT(sizeof(((struct trapframe *)0)->tf_s) ==
336    sizeof((struct reg *)0)->s);
337CTASSERT(sizeof(((struct trapframe *)0)->tf_t) ==
338    sizeof((struct reg *)0)->t);
339
340/* Support for FDT configurations only. */
341CTASSERT(FDT);
342
343int
344get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
345{
346	struct trapframe *tf = td->td_frame;
347
348	memcpy(mcp->mc_gpregs.gp_t, tf->tf_t, sizeof(mcp->mc_gpregs.gp_t));
349	memcpy(mcp->mc_gpregs.gp_s, tf->tf_s, sizeof(mcp->mc_gpregs.gp_s));
350	memcpy(mcp->mc_gpregs.gp_a, tf->tf_a, sizeof(mcp->mc_gpregs.gp_a));
351
352	if (clear_ret & GET_MC_CLEAR_RET) {
353		mcp->mc_gpregs.gp_a[0] = 0;
354		mcp->mc_gpregs.gp_t[0] = 0; /* clear syscall error */
355	}
356
357	mcp->mc_gpregs.gp_ra = tf->tf_ra;
358	mcp->mc_gpregs.gp_sp = tf->tf_sp;
359	mcp->mc_gpregs.gp_gp = tf->tf_gp;
360	mcp->mc_gpregs.gp_tp = tf->tf_tp;
361	mcp->mc_gpregs.gp_sepc = tf->tf_sepc;
362	mcp->mc_gpregs.gp_sstatus = tf->tf_sstatus;
363	get_fpcontext(td, mcp);
364
365	return (0);
366}
367
368int
369set_mcontext(struct thread *td, mcontext_t *mcp)
370{
371	struct trapframe *tf;
372
373	tf = td->td_frame;
374
375	/*
376	 * Permit changes to the USTATUS bits of SSTATUS.
377	 *
378	 * Ignore writes to read-only bits (SD, XS).
379	 *
380	 * Ignore writes to the FS field as set_fpcontext() will set
381	 * it explicitly.
382	 */
383	if (((mcp->mc_gpregs.gp_sstatus ^ tf->tf_sstatus) &
384	    ~(SSTATUS_SD | SSTATUS_XS_MASK | SSTATUS_FS_MASK | SSTATUS_UPIE |
385	    SSTATUS_UIE)) != 0)
386		return (EINVAL);
387
388	memcpy(tf->tf_t, mcp->mc_gpregs.gp_t, sizeof(tf->tf_t));
389	memcpy(tf->tf_s, mcp->mc_gpregs.gp_s, sizeof(tf->tf_s));
390	memcpy(tf->tf_a, mcp->mc_gpregs.gp_a, sizeof(tf->tf_a));
391
392	tf->tf_ra = mcp->mc_gpregs.gp_ra;
393	tf->tf_sp = mcp->mc_gpregs.gp_sp;
394	tf->tf_gp = mcp->mc_gpregs.gp_gp;
395	tf->tf_sepc = mcp->mc_gpregs.gp_sepc;
396	tf->tf_sstatus = mcp->mc_gpregs.gp_sstatus;
397	set_fpcontext(td, mcp);
398
399	return (0);
400}
401
402static void
403get_fpcontext(struct thread *td, mcontext_t *mcp)
404{
405#ifdef FPE
406	struct pcb *curpcb;
407
408	critical_enter();
409
410	curpcb = curthread->td_pcb;
411
412	KASSERT(td->td_pcb == curpcb, ("Invalid fpe pcb"));
413
414	if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
415		/*
416		 * If we have just been running FPE instructions we will
417		 * need to save the state to memcpy it below.
418		 */
419		fpe_state_save(td);
420
421		KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
422		    ("Non-userspace FPE flags set in get_fpcontext"));
423		memcpy(mcp->mc_fpregs.fp_x, curpcb->pcb_x,
424		    sizeof(mcp->mc_fpregs.fp_x));
425		mcp->mc_fpregs.fp_fcsr = curpcb->pcb_fcsr;
426		mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
427		mcp->mc_flags |= _MC_FP_VALID;
428	}
429
430	critical_exit();
431#endif
432}
433
434static void
435set_fpcontext(struct thread *td, mcontext_t *mcp)
436{
437#ifdef FPE
438	struct pcb *curpcb;
439#endif
440
441	td->td_frame->tf_sstatus &= ~SSTATUS_FS_MASK;
442	td->td_frame->tf_sstatus |= SSTATUS_FS_OFF;
443
444#ifdef FPE
445	critical_enter();
446
447	if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
448		curpcb = curthread->td_pcb;
449		/* FPE usage is enabled, override registers. */
450		memcpy(curpcb->pcb_x, mcp->mc_fpregs.fp_x,
451		    sizeof(mcp->mc_fpregs.fp_x));
452		curpcb->pcb_fcsr = mcp->mc_fpregs.fp_fcsr;
453		curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK;
454		td->td_frame->tf_sstatus |= SSTATUS_FS_CLEAN;
455	}
456
457	critical_exit();
458#endif
459}
460
461void
462cpu_idle(int busy)
463{
464
465	spinlock_enter();
466	if (!busy)
467		cpu_idleclock();
468	if (!sched_runnable())
469		__asm __volatile(
470		    "fence \n"
471		    "wfi   \n");
472	if (!busy)
473		cpu_activeclock();
474	spinlock_exit();
475}
476
477void
478cpu_halt(void)
479{
480
481	/*
482	 * Try to power down using the HSM SBI extension and fall back to a
483	 * simple wfi loop.
484	 */
485	intr_disable();
486	if (sbi_probe_extension(SBI_EXT_ID_HSM) != 0)
487		sbi_hsm_hart_stop();
488	for (;;)
489		__asm __volatile("wfi");
490	/* NOTREACHED */
491}
492
493/*
494 * Flush the D-cache for non-DMA I/O so that the I-cache can
495 * be made coherent later.
496 */
497void
498cpu_flush_dcache(void *ptr, size_t len)
499{
500
501	/* TBD */
502}
503
504/* Get current clock frequency for the given CPU ID. */
505int
506cpu_est_clockrate(int cpu_id, uint64_t *rate)
507{
508
509	panic("cpu_est_clockrate");
510}
511
512void
513cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
514{
515}
516
517void
518spinlock_enter(void)
519{
520	struct thread *td;
521	register_t reg;
522
523	td = curthread;
524	if (td->td_md.md_spinlock_count == 0) {
525		reg = intr_disable();
526		td->td_md.md_spinlock_count = 1;
527		td->td_md.md_saved_sstatus_ie = reg;
528		critical_enter();
529	} else
530		td->td_md.md_spinlock_count++;
531}
532
533void
534spinlock_exit(void)
535{
536	struct thread *td;
537	register_t sstatus_ie;
538
539	td = curthread;
540	sstatus_ie = td->td_md.md_saved_sstatus_ie;
541	td->td_md.md_spinlock_count--;
542	if (td->td_md.md_spinlock_count == 0) {
543		critical_exit();
544		intr_restore(sstatus_ie);
545	}
546}
547
548#ifndef	_SYS_SYSPROTO_H_
549struct sigreturn_args {
550	ucontext_t *ucp;
551};
552#endif
553
554int
555sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
556{
557	ucontext_t uc;
558	int error;
559
560	if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
561		return (EFAULT);
562
563	error = set_mcontext(td, &uc.uc_mcontext);
564	if (error != 0)
565		return (error);
566
567	/* Restore signal mask. */
568	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
569
570	return (EJUSTRETURN);
571}
572
573/*
574 * Construct a PCB from a trapframe. This is called from kdb_trap() where
575 * we want to start a backtrace from the function that caused us to enter
576 * the debugger. We have the context in the trapframe, but base the trace
577 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
578 * enough for a backtrace.
579 */
580void
581makectx(struct trapframe *tf, struct pcb *pcb)
582{
583
584	memcpy(pcb->pcb_s, tf->tf_s, sizeof(tf->tf_s));
585
586	pcb->pcb_ra = tf->tf_sepc;
587	pcb->pcb_sp = tf->tf_sp;
588	pcb->pcb_gp = tf->tf_gp;
589	pcb->pcb_tp = tf->tf_tp;
590}
591
592void
593sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
594{
595	struct sigframe *fp, frame;
596	struct sysentvec *sysent;
597	struct trapframe *tf;
598	struct sigacts *psp;
599	struct thread *td;
600	struct proc *p;
601	int onstack;
602	int sig;
603
604	td = curthread;
605	p = td->td_proc;
606	PROC_LOCK_ASSERT(p, MA_OWNED);
607
608	sig = ksi->ksi_signo;
609	psp = p->p_sigacts;
610	mtx_assert(&psp->ps_mtx, MA_OWNED);
611
612	tf = td->td_frame;
613	onstack = sigonstack(tf->tf_sp);
614
615	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
616	    catcher, sig);
617
618	/* Allocate and validate space for the signal handler context. */
619	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
620	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
621		fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
622		    td->td_sigstk.ss_size);
623	} else {
624		fp = (struct sigframe *)td->td_frame->tf_sp;
625	}
626
627	/* Make room, keeping the stack aligned */
628	fp--;
629	fp = (struct sigframe *)STACKALIGN(fp);
630
631	/* Fill in the frame to copy out */
632	bzero(&frame, sizeof(frame));
633	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
634	frame.sf_si = ksi->ksi_info;
635	frame.sf_uc.uc_sigmask = *mask;
636	frame.sf_uc.uc_stack = td->td_sigstk;
637	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) != 0 ?
638	    (onstack ? SS_ONSTACK : 0) : SS_DISABLE;
639	mtx_unlock(&psp->ps_mtx);
640	PROC_UNLOCK(td->td_proc);
641
642	/* Copy the sigframe out to the user's stack. */
643	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
644		/* Process has trashed its stack. Kill it. */
645		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
646		PROC_LOCK(p);
647		sigexit(td, SIGILL);
648	}
649
650	tf->tf_a[0] = sig;
651	tf->tf_a[1] = (register_t)&fp->sf_si;
652	tf->tf_a[2] = (register_t)&fp->sf_uc;
653
654	tf->tf_sepc = (register_t)catcher;
655	tf->tf_sp = (register_t)fp;
656
657	sysent = p->p_sysent;
658	if (sysent->sv_sigcode_base != 0)
659		tf->tf_ra = (register_t)sysent->sv_sigcode_base;
660	else
661		tf->tf_ra = (register_t)(sysent->sv_psstrings -
662		    *(sysent->sv_szsigcode));
663
664	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_sepc,
665	    tf->tf_sp);
666
667	PROC_LOCK(p);
668	mtx_lock(&psp->ps_mtx);
669}
670
671static void
672init_proc0(vm_offset_t kstack)
673{
674	struct pcpu *pcpup;
675
676	pcpup = &__pcpu[0];
677
678	proc_linkup0(&proc0, &thread0);
679	thread0.td_kstack = kstack;
680	thread0.td_kstack_pages = KSTACK_PAGES;
681	thread0.td_pcb = (struct pcb *)(thread0.td_kstack +
682	    thread0.td_kstack_pages * PAGE_SIZE) - 1;
683	thread0.td_pcb->pcb_fpflags = 0;
684	thread0.td_frame = &proc0_tf;
685	pcpup->pc_curpcb = thread0.td_pcb;
686}
687
688#ifdef FDT
689static void
690try_load_dtb(caddr_t kmdp)
691{
692	vm_offset_t dtbp;
693
694	dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
695
696#if defined(FDT_DTB_STATIC)
697	/*
698	 * In case the device tree blob was not retrieved (from metadata) try
699	 * to use the statically embedded one.
700	 */
701	if (dtbp == (vm_offset_t)NULL)
702		dtbp = (vm_offset_t)&fdt_static_dtb;
703#endif
704
705	if (dtbp == (vm_offset_t)NULL) {
706		printf("ERROR loading DTB\n");
707		return;
708	}
709
710	if (OF_install(OFW_FDT, 0) == FALSE)
711		panic("Cannot install FDT");
712
713	if (OF_init((void *)dtbp) != 0)
714		panic("OF_init failed with the found device tree");
715}
716#endif
717
718static void
719cache_setup(void)
720{
721
722	/* TODO */
723
724	dcache_line_size = 0;
725	icache_line_size = 0;
726	idcache_line_size = 0;
727}
728
729/*
730 * Fake up a boot descriptor table.
731 */
732static void
733fake_preload_metadata(struct riscv_bootparams *rvbp)
734{
735	static uint32_t fake_preload[48];
736	vm_offset_t lastaddr;
737	size_t fake_size, dtb_size;
738
739#define PRELOAD_PUSH_VALUE(type, value) do {			\
740	*(type *)((char *)fake_preload + fake_size) = (value);	\
741	fake_size += sizeof(type);				\
742} while (0)
743
744#define PRELOAD_PUSH_STRING(str) do {				\
745	uint32_t ssize;						\
746	ssize = strlen(str) + 1;				\
747	PRELOAD_PUSH_VALUE(uint32_t, ssize);			\
748	strcpy(((char *)fake_preload + fake_size), str);	\
749	fake_size += ssize;					\
750	fake_size = roundup(fake_size, sizeof(u_long));		\
751} while (0)
752
753	fake_size = 0;
754	lastaddr = (vm_offset_t)&end;
755
756	PRELOAD_PUSH_VALUE(uint32_t, MODINFO_NAME);
757	PRELOAD_PUSH_STRING("kernel");
758	PRELOAD_PUSH_VALUE(uint32_t, MODINFO_TYPE);
759	PRELOAD_PUSH_STRING("elf kernel");
760
761	PRELOAD_PUSH_VALUE(uint32_t, MODINFO_ADDR);
762	PRELOAD_PUSH_VALUE(uint32_t, sizeof(vm_offset_t));
763	PRELOAD_PUSH_VALUE(uint64_t, KERNBASE);
764
765	PRELOAD_PUSH_VALUE(uint32_t, MODINFO_SIZE);
766	PRELOAD_PUSH_VALUE(uint32_t, sizeof(size_t));
767	PRELOAD_PUSH_VALUE(uint64_t, (size_t)((vm_offset_t)&end - KERNBASE));
768
769	/* Copy the DTB to KVA space. */
770	lastaddr = roundup(lastaddr, sizeof(int));
771	PRELOAD_PUSH_VALUE(uint32_t, MODINFO_METADATA | MODINFOMD_DTBP);
772	PRELOAD_PUSH_VALUE(uint32_t, sizeof(vm_offset_t));
773	PRELOAD_PUSH_VALUE(vm_offset_t, lastaddr);
774	dtb_size = fdt_totalsize(rvbp->dtbp_virt);
775	memmove((void *)lastaddr, (const void *)rvbp->dtbp_virt, dtb_size);
776	lastaddr = roundup(lastaddr + dtb_size, sizeof(int));
777
778	PRELOAD_PUSH_VALUE(uint32_t, MODINFO_METADATA | MODINFOMD_KERNEND);
779	PRELOAD_PUSH_VALUE(uint32_t, sizeof(vm_offset_t));
780	PRELOAD_PUSH_VALUE(vm_offset_t, lastaddr);
781
782	PRELOAD_PUSH_VALUE(uint32_t, MODINFO_METADATA | MODINFOMD_HOWTO);
783	PRELOAD_PUSH_VALUE(uint32_t, sizeof(int));
784	PRELOAD_PUSH_VALUE(int, RB_VERBOSE);
785
786	/* End marker */
787	PRELOAD_PUSH_VALUE(uint32_t, 0);
788	PRELOAD_PUSH_VALUE(uint32_t, 0);
789	preload_metadata = (caddr_t)fake_preload;
790
791	/* Check if bootloader clobbered part of the kernel with the DTB. */
792	KASSERT(rvbp->dtbp_phys + dtb_size <= rvbp->kern_phys ||
793		rvbp->dtbp_phys >= rvbp->kern_phys + (lastaddr - KERNBASE),
794	    ("FDT (%lx-%lx) and kernel (%lx-%lx) overlap", rvbp->dtbp_phys,
795		rvbp->dtbp_phys + dtb_size, rvbp->kern_phys,
796		rvbp->kern_phys + (lastaddr - KERNBASE)));
797	KASSERT(fake_size < sizeof(fake_preload),
798	    ("Too many fake_preload items"));
799
800	if (boothowto & RB_VERBOSE)
801		printf("FDT phys (%lx-%lx), kernel phys (%lx-%lx)\n",
802		    rvbp->dtbp_phys, rvbp->dtbp_phys + dtb_size,
803		    rvbp->kern_phys, rvbp->kern_phys + (lastaddr - KERNBASE));
804}
805
806#ifdef FDT
807static void
808parse_fdt_bootargs(void)
809{
810	char bootargs[512];
811
812	bootargs[sizeof(bootargs) - 1] = '\0';
813	if (fdt_get_chosen_bootargs(bootargs, sizeof(bootargs) - 1) == 0) {
814		boothowto |= boot_parse_cmdline(bootargs);
815	}
816}
817#endif
818
819static vm_offset_t
820parse_metadata(void)
821{
822	caddr_t kmdp;
823	vm_offset_t lastaddr;
824#ifdef DDB
825	vm_offset_t ksym_start, ksym_end;
826#endif
827	char *kern_envp;
828
829	/* Find the kernel address */
830	kmdp = preload_search_by_type("elf kernel");
831	if (kmdp == NULL)
832		kmdp = preload_search_by_type("elf64 kernel");
833	KASSERT(kmdp != NULL, ("No preload metadata found!"));
834
835	/* Read the boot metadata */
836	boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
837	lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
838	kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
839	if (kern_envp != NULL)
840		init_static_kenv(kern_envp, 0);
841	else
842		init_static_kenv(static_kenv, sizeof(static_kenv));
843#ifdef DDB
844	ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
845	ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
846	db_fetch_ksymtab(ksym_start, ksym_end);
847#endif
848#ifdef FDT
849	try_load_dtb(kmdp);
850	if (kern_envp == NULL)
851		parse_fdt_bootargs();
852#endif
853	return (lastaddr);
854}
855
856void
857initriscv(struct riscv_bootparams *rvbp)
858{
859	struct mem_region mem_regions[FDT_MEM_REGIONS];
860	struct pcpu *pcpup;
861	int mem_regions_sz;
862	vm_offset_t lastaddr;
863	vm_size_t kernlen;
864#ifdef FDT
865	phandle_t chosen;
866	uint32_t hart;
867#endif
868	char *env;
869
870	TSRAW(&thread0, TS_ENTER, __func__, NULL);
871
872	/* Set the pcpu data, this is needed by pmap_bootstrap */
873	pcpup = &__pcpu[0];
874	pcpu_init(pcpup, 0, sizeof(struct pcpu));
875
876	/* Set the pcpu pointer */
877	__asm __volatile("mv tp, %0" :: "r"(pcpup));
878
879	PCPU_SET(curthread, &thread0);
880
881	/* Initialize SBI interface. */
882	sbi_init();
883
884	/* Parse the boot metadata. */
885	if (rvbp->modulep != 0) {
886		preload_metadata = (caddr_t)rvbp->modulep;
887	} else {
888		fake_preload_metadata(rvbp);
889	}
890	lastaddr = parse_metadata();
891
892#ifdef FDT
893	/*
894	 * Look for the boot hart ID. This was either passed in directly from
895	 * the SBI firmware and handled by locore, or was stored in the device
896	 * tree by an earlier boot stage.
897	 */
898	chosen = OF_finddevice("/chosen");
899	if (OF_getencprop(chosen, "boot-hartid", &hart, sizeof(hart)) != -1) {
900		boot_hart = hart;
901	}
902#endif
903	if (boot_hart == BOOT_HART_INVALID) {
904		panic("Boot hart ID was not properly set");
905	}
906	pcpup->pc_hart = boot_hart;
907
908#ifdef FDT
909	/*
910	 * Exclude reserved memory specified by the device tree. Typically,
911	 * this contains an entry for memory used by the runtime SBI firmware.
912	 */
913	if (fdt_get_reserved_mem(mem_regions, &mem_regions_sz) == 0) {
914		physmem_exclude_regions(mem_regions, mem_regions_sz,
915		    EXFLAG_NODUMP | EXFLAG_NOALLOC);
916	}
917
918	/* Grab physical memory regions information from device tree. */
919	if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, NULL) != 0) {
920		panic("Cannot get physical memory regions");
921	}
922	physmem_hardware_regions(mem_regions, mem_regions_sz);
923#endif
924
925	/* Do basic tuning, hz etc */
926	init_param1();
927
928	cache_setup();
929
930	/* Bootstrap enough of pmap to enter the kernel proper */
931	kernlen = (lastaddr - KERNBASE);
932	pmap_bootstrap(rvbp->kern_l1pt, rvbp->kern_phys, kernlen);
933
934#ifdef FDT
935	/*
936	 * XXX: Exclude the lowest 2MB of physical memory, if it hasn't been
937	 * already, as this area is assumed to contain the SBI firmware. This
938	 * is a little fragile, but it is consistent with the platforms we
939	 * support so far.
940	 *
941	 * TODO: remove this when the all regular booting methods properly
942	 * report their reserved memory in the device tree.
943	 */
944	if (mem_regions[0].mr_start == physmap[0]) {
945		physmem_exclude_region(mem_regions[0].mr_start, L2_SIZE,
946		    EXFLAG_NODUMP | EXFLAG_NOALLOC);
947	}
948#endif
949	physmem_init_kernel_globals();
950
951	/* Establish static device mappings */
952	devmap_bootstrap(0, NULL);
953
954	cninit();
955
956	/*
957	 * Dump the boot metadata. We have to wait for cninit() since console
958	 * output is required. If it's grossly incorrect the kernel will never
959	 * make it this far.
960	 */
961	if (getenv_is_true("debug.dump_modinfo_at_boot"))
962		preload_dump();
963
964	init_proc0(rvbp->kern_stack);
965
966	msgbufinit(msgbufp, msgbufsize);
967	mutex_init();
968	init_param2(physmem);
969	kdb_init();
970
971	env = kern_getenv("kernelname");
972	if (env != NULL)
973		strlcpy(kernelname, env, sizeof(kernelname));
974
975	if (boothowto & RB_VERBOSE)
976		physmem_print_tables();
977
978	early_boot = 0;
979
980	TSEXIT();
981}
982
983#undef bzero
984void
985bzero(void *buf, size_t len)
986{
987	uint8_t *p;
988
989	p = buf;
990	while(len-- > 0)
991		*p++ = 0;
992}
993