Deleted Added
full compact
machdep.c (317004) machdep.c (319202)
1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include "opt_platform.h"
29#include "opt_ddb.h"
30
31#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include "opt_platform.h"
29#include "opt_ddb.h"
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: stable/11/sys/arm64/arm64/machdep.c 317004 2017-04-16 07:21:20Z mmel $");
32__FBSDID("$FreeBSD: stable/11/sys/arm64/arm64/machdep.c 319202 2017-05-30 12:26:36Z andrew $");
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/buf.h>
37#include <sys/bus.h>
38#include <sys/cons.h>
39#include <sys/cpu.h>
40#include <sys/devmap.h>
41#include <sys/efi.h>
42#include <sys/exec.h>
43#include <sys/imgact.h>
44#include <sys/kdb.h>
45#include <sys/kernel.h>
46#include <sys/limits.h>
47#include <sys/linker.h>
48#include <sys/msgbuf.h>
49#include <sys/pcpu.h>
50#include <sys/proc.h>
51#include <sys/ptrace.h>
52#include <sys/reboot.h>
53#include <sys/rwlock.h>
54#include <sys/sched.h>
55#include <sys/signalvar.h>
56#include <sys/syscallsubr.h>
57#include <sys/sysent.h>
58#include <sys/sysproto.h>
59#include <sys/ucontext.h>
60#include <sys/vdso.h>
61
62#include <vm/vm.h>
63#include <vm/vm_kern.h>
64#include <vm/vm_object.h>
65#include <vm/vm_page.h>
66#include <vm/pmap.h>
67#include <vm/vm_map.h>
68#include <vm/vm_pager.h>
69
70#include <machine/armreg.h>
71#include <machine/cpu.h>
72#include <machine/debug_monitor.h>
73#include <machine/kdb.h>
74#include <machine/machdep.h>
75#include <machine/metadata.h>
76#include <machine/md_var.h>
77#include <machine/pcb.h>
78#include <machine/reg.h>
79#include <machine/vmparam.h>
80
81#ifdef VFP
82#include <machine/vfp.h>
83#endif
84
85#ifdef FDT
86#include <dev/fdt/fdt_common.h>
87#include <dev/ofw/openfirm.h>
88#endif
89
90struct pcpu __pcpu[MAXCPU];
91
92static struct trapframe proc0_tf;
93
94vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2];
95vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2];
96
97int early_boot = 1;
98int cold = 1;
99long realmem = 0;
100long Maxmem = 0;
101
102#define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
103vm_paddr_t physmap[PHYSMAP_SIZE];
104u_int physmap_idx;
105
106struct kva_md_info kmi;
107
108int64_t dcache_line_size; /* The minimum D cache line size */
109int64_t icache_line_size; /* The minimum I cache line size */
110int64_t idcache_line_size; /* The minimum cache line size */
111int64_t dczva_line_size; /* The size of cache line the dc zva zeroes */
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/buf.h>
37#include <sys/bus.h>
38#include <sys/cons.h>
39#include <sys/cpu.h>
40#include <sys/devmap.h>
41#include <sys/efi.h>
42#include <sys/exec.h>
43#include <sys/imgact.h>
44#include <sys/kdb.h>
45#include <sys/kernel.h>
46#include <sys/limits.h>
47#include <sys/linker.h>
48#include <sys/msgbuf.h>
49#include <sys/pcpu.h>
50#include <sys/proc.h>
51#include <sys/ptrace.h>
52#include <sys/reboot.h>
53#include <sys/rwlock.h>
54#include <sys/sched.h>
55#include <sys/signalvar.h>
56#include <sys/syscallsubr.h>
57#include <sys/sysent.h>
58#include <sys/sysproto.h>
59#include <sys/ucontext.h>
60#include <sys/vdso.h>
61
62#include <vm/vm.h>
63#include <vm/vm_kern.h>
64#include <vm/vm_object.h>
65#include <vm/vm_page.h>
66#include <vm/pmap.h>
67#include <vm/vm_map.h>
68#include <vm/vm_pager.h>
69
70#include <machine/armreg.h>
71#include <machine/cpu.h>
72#include <machine/debug_monitor.h>
73#include <machine/kdb.h>
74#include <machine/machdep.h>
75#include <machine/metadata.h>
76#include <machine/md_var.h>
77#include <machine/pcb.h>
78#include <machine/reg.h>
79#include <machine/vmparam.h>
80
81#ifdef VFP
82#include <machine/vfp.h>
83#endif
84
85#ifdef FDT
86#include <dev/fdt/fdt_common.h>
87#include <dev/ofw/openfirm.h>
88#endif
89
90struct pcpu __pcpu[MAXCPU];
91
92static struct trapframe proc0_tf;
93
94vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2];
95vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2];
96
97int early_boot = 1;
98int cold = 1;
99long realmem = 0;
100long Maxmem = 0;
101
102#define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
103vm_paddr_t physmap[PHYSMAP_SIZE];
104u_int physmap_idx;
105
106struct kva_md_info kmi;
107
108int64_t dcache_line_size; /* The minimum D cache line size */
109int64_t icache_line_size; /* The minimum I cache line size */
110int64_t idcache_line_size; /* The minimum cache line size */
111int64_t dczva_line_size; /* The size of cache line the dc zva zeroes */
112int has_pan;
112
113/* pagezero_* implementations are provided in support.S */
114void pagezero_simple(void *);
115void pagezero_cache(void *);
116
117/* pagezero_simple is default pagezero */
118void (*pagezero)(void *p) = pagezero_simple;
119
120static void
113
114/* pagezero_* implementations are provided in support.S */
115void pagezero_simple(void *);
116void pagezero_cache(void *);
117
118/* pagezero_simple is default pagezero */
119void (*pagezero)(void *p) = pagezero_simple;
120
121static void
122pan_setup(void)
123{
124 uint64_t id_aa64mfr1;
125
126 id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
127 if (ID_AA64MMFR1_PAN(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
128 has_pan = 1;
129}
130
131void
132pan_enable(void)
133{
134
135 /*
136 * The LLVM integrated assembler doesn't understand the PAN
137 * PSTATE field. Because of this we need to manually create
138 * the instruction in an asm block. This is equivalent to:
139 * msr pan, #1
140 *
141 * This sets the PAN bit, stopping the kernel from accessing
142 * memory when userspace can also access it unless the kernel
143 * uses the userspace load/store instructions.
144 */
145 if (has_pan) {
146 WRITE_SPECIALREG(sctlr_el1,
147 READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
148 __asm __volatile(".inst 0xd500409f | (0x1 << 8)");
149 }
150}
151
152static void
121cpu_startup(void *dummy)
122{
123
124 identify_cpu();
125
126 vm_ksubmap_init(&kmi);
127 bufinit();
128 vm_pager_bufferinit();
129}
130
131SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
132
133int
134cpu_idle_wakeup(int cpu)
135{
136
137 return (0);
138}
139
140int
141fill_regs(struct thread *td, struct reg *regs)
142{
143 struct trapframe *frame;
144
145 frame = td->td_frame;
146 regs->sp = frame->tf_sp;
147 regs->lr = frame->tf_lr;
148 regs->elr = frame->tf_elr;
149 regs->spsr = frame->tf_spsr;
150
151 memcpy(regs->x, frame->tf_x, sizeof(regs->x));
152
153 return (0);
154}
155
156int
157set_regs(struct thread *td, struct reg *regs)
158{
159 struct trapframe *frame;
160
161 frame = td->td_frame;
162 frame->tf_sp = regs->sp;
163 frame->tf_lr = regs->lr;
164 frame->tf_elr = regs->elr;
165 frame->tf_spsr = regs->spsr;
166
167 memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
168
169 return (0);
170}
171
172int
173fill_fpregs(struct thread *td, struct fpreg *regs)
174{
175#ifdef VFP
176 struct pcb *pcb;
177
178 pcb = td->td_pcb;
179 if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
180 /*
181 * If we have just been running VFP instructions we will
182 * need to save the state to memcpy it below.
183 */
184 vfp_save_state(td, pcb);
185
186 memcpy(regs->fp_q, pcb->pcb_vfp, sizeof(regs->fp_q));
187 regs->fp_cr = pcb->pcb_fpcr;
188 regs->fp_sr = pcb->pcb_fpsr;
189 } else
190#endif
191 memset(regs->fp_q, 0, sizeof(regs->fp_q));
192 return (0);
193}
194
195int
196set_fpregs(struct thread *td, struct fpreg *regs)
197{
198#ifdef VFP
199 struct pcb *pcb;
200
201 pcb = td->td_pcb;
202 memcpy(pcb->pcb_vfp, regs->fp_q, sizeof(regs->fp_q));
203 pcb->pcb_fpcr = regs->fp_cr;
204 pcb->pcb_fpsr = regs->fp_sr;
205#endif
206 return (0);
207}
208
209int
210fill_dbregs(struct thread *td, struct dbreg *regs)
211{
212
213 panic("ARM64TODO: fill_dbregs");
214}
215
216int
217set_dbregs(struct thread *td, struct dbreg *regs)
218{
219
220 panic("ARM64TODO: set_dbregs");
221}
222
223int
224ptrace_set_pc(struct thread *td, u_long addr)
225{
226
227 panic("ARM64TODO: ptrace_set_pc");
228 return (0);
229}
230
231int
232ptrace_single_step(struct thread *td)
233{
234
235 td->td_frame->tf_spsr |= PSR_SS;
236 td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
237 return (0);
238}
239
240int
241ptrace_clear_single_step(struct thread *td)
242{
243
244 td->td_frame->tf_spsr &= ~PSR_SS;
245 td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
246 return (0);
247}
248
249void
250exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
251{
252 struct trapframe *tf = td->td_frame;
253
254 memset(tf, 0, sizeof(struct trapframe));
255
256 /*
257 * We need to set x0 for init as it doesn't call
258 * cpu_set_syscall_retval to copy the value. We also
259 * need to set td_retval for the cases where we do.
260 */
261 tf->tf_x[0] = td->td_retval[0] = stack;
262 tf->tf_sp = STACKALIGN(stack);
263 tf->tf_lr = imgp->entry_addr;
264 tf->tf_elr = imgp->entry_addr;
265}
266
267/* Sanity check these are the same size, they will be memcpy'd to and fro */
268CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
269 sizeof((struct gpregs *)0)->gp_x);
270CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
271 sizeof((struct reg *)0)->x);
272
273int
274get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
275{
276 struct trapframe *tf = td->td_frame;
277
278 if (clear_ret & GET_MC_CLEAR_RET) {
279 mcp->mc_gpregs.gp_x[0] = 0;
280 mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
281 } else {
282 mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
283 mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
284 }
285
286 memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
287 sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
288
289 mcp->mc_gpregs.gp_sp = tf->tf_sp;
290 mcp->mc_gpregs.gp_lr = tf->tf_lr;
291 mcp->mc_gpregs.gp_elr = tf->tf_elr;
292
293 return (0);
294}
295
296int
297set_mcontext(struct thread *td, mcontext_t *mcp)
298{
299 struct trapframe *tf = td->td_frame;
300
301 memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
302
303 tf->tf_sp = mcp->mc_gpregs.gp_sp;
304 tf->tf_lr = mcp->mc_gpregs.gp_lr;
305 tf->tf_elr = mcp->mc_gpregs.gp_elr;
306 tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
307
308 return (0);
309}
310
311static void
312get_fpcontext(struct thread *td, mcontext_t *mcp)
313{
314#ifdef VFP
315 struct pcb *curpcb;
316
317 critical_enter();
318
319 curpcb = curthread->td_pcb;
320
321 if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
322 /*
323 * If we have just been running VFP instructions we will
324 * need to save the state to memcpy it below.
325 */
326 vfp_save_state(td, curpcb);
327
328 memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_vfp,
329 sizeof(mcp->mc_fpregs));
330 mcp->mc_fpregs.fp_cr = curpcb->pcb_fpcr;
331 mcp->mc_fpregs.fp_sr = curpcb->pcb_fpsr;
332 mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
333 mcp->mc_flags |= _MC_FP_VALID;
334 }
335
336 critical_exit();
337#endif
338}
339
340static void
341set_fpcontext(struct thread *td, mcontext_t *mcp)
342{
343#ifdef VFP
344 struct pcb *curpcb;
345
346 critical_enter();
347
348 if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
349 curpcb = curthread->td_pcb;
350
351 /*
352 * Discard any vfp state for the current thread, we
353 * are about to override it.
354 */
355 vfp_discard(td);
356
357 memcpy(curpcb->pcb_vfp, mcp->mc_fpregs.fp_q,
358 sizeof(mcp->mc_fpregs));
359 curpcb->pcb_fpcr = mcp->mc_fpregs.fp_cr;
360 curpcb->pcb_fpsr = mcp->mc_fpregs.fp_sr;
361 curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags;
362 }
363
364 critical_exit();
365#endif
366}
367
368void
369cpu_idle(int busy)
370{
371
372 spinlock_enter();
373 if (!busy)
374 cpu_idleclock();
375 if (!sched_runnable())
376 __asm __volatile(
377 "dsb sy \n"
378 "wfi \n");
379 if (!busy)
380 cpu_activeclock();
381 spinlock_exit();
382}
383
384void
385cpu_halt(void)
386{
387
388 /* We should have shutdown by now, if not enter a low power sleep */
389 intr_disable();
390 while (1) {
391 __asm __volatile("wfi");
392 }
393}
394
395/*
396 * Flush the D-cache for non-DMA I/O so that the I-cache can
397 * be made coherent later.
398 */
399void
400cpu_flush_dcache(void *ptr, size_t len)
401{
402
403 /* ARM64TODO TBD */
404}
405
406/* Get current clock frequency for the given CPU ID. */
407int
408cpu_est_clockrate(int cpu_id, uint64_t *rate)
409{
410 struct pcpu *pc;
411
412 pc = pcpu_find(cpu_id);
413 if (pc == NULL || rate == NULL)
414 return (EINVAL);
415
416 if (pc->pc_clock == 0)
417 return (EOPNOTSUPP);
418
419 *rate = pc->pc_clock;
420 return (0);
421}
422
423void
424cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
425{
426
427 pcpu->pc_acpi_id = 0xffffffff;
428}
429
430void
431spinlock_enter(void)
432{
433 struct thread *td;
434 register_t daif;
435
436 td = curthread;
437 if (td->td_md.md_spinlock_count == 0) {
438 daif = intr_disable();
439 td->td_md.md_spinlock_count = 1;
440 td->td_md.md_saved_daif = daif;
441 } else
442 td->td_md.md_spinlock_count++;
443 critical_enter();
444}
445
446void
447spinlock_exit(void)
448{
449 struct thread *td;
450 register_t daif;
451
452 td = curthread;
453 critical_exit();
454 daif = td->td_md.md_saved_daif;
455 td->td_md.md_spinlock_count--;
456 if (td->td_md.md_spinlock_count == 0)
457 intr_restore(daif);
458}
459
460#ifndef _SYS_SYSPROTO_H_
461struct sigreturn_args {
462 ucontext_t *ucp;
463};
464#endif
465
466int
467sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
468{
469 ucontext_t uc;
470 uint32_t spsr;
471
472 if (uap == NULL)
473 return (EFAULT);
474 if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
475 return (EFAULT);
476
477 spsr = uc.uc_mcontext.mc_gpregs.gp_spsr;
478 if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
479 (spsr & (PSR_F | PSR_I | PSR_A | PSR_D)) != 0)
480 return (EINVAL);
481
482 set_mcontext(td, &uc.uc_mcontext);
483 set_fpcontext(td, &uc.uc_mcontext);
484
485 /* Restore signal mask. */
486 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
487
488 return (EJUSTRETURN);
489}
490
491/*
492 * Construct a PCB from a trapframe. This is called from kdb_trap() where
493 * we want to start a backtrace from the function that caused us to enter
494 * the debugger. We have the context in the trapframe, but base the trace
495 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
496 * enough for a backtrace.
497 */
498void
499makectx(struct trapframe *tf, struct pcb *pcb)
500{
501 int i;
502
503 for (i = 0; i < PCB_LR; i++)
504 pcb->pcb_x[i] = tf->tf_x[i];
505
506 pcb->pcb_x[PCB_LR] = tf->tf_lr;
507 pcb->pcb_pc = tf->tf_elr;
508 pcb->pcb_sp = tf->tf_sp;
509}
510
511void
512sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
513{
514 struct thread *td;
515 struct proc *p;
516 struct trapframe *tf;
517 struct sigframe *fp, frame;
518 struct sigacts *psp;
519 struct sysentvec *sysent;
520 int code, onstack, sig;
521
522 td = curthread;
523 p = td->td_proc;
524 PROC_LOCK_ASSERT(p, MA_OWNED);
525
526 sig = ksi->ksi_signo;
527 code = ksi->ksi_code;
528 psp = p->p_sigacts;
529 mtx_assert(&psp->ps_mtx, MA_OWNED);
530
531 tf = td->td_frame;
532 onstack = sigonstack(tf->tf_sp);
533
534 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
535 catcher, sig);
536
537 /* Allocate and validate space for the signal handler context. */
538 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
539 SIGISMEMBER(psp->ps_sigonstack, sig)) {
540 fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
541 td->td_sigstk.ss_size);
542#if defined(COMPAT_43)
543 td->td_sigstk.ss_flags |= SS_ONSTACK;
544#endif
545 } else {
546 fp = (struct sigframe *)td->td_frame->tf_sp;
547 }
548
549 /* Make room, keeping the stack aligned */
550 fp--;
551 fp = (struct sigframe *)STACKALIGN(fp);
552
553 /* Fill in the frame to copy out */
554 get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
555 get_fpcontext(td, &frame.sf_uc.uc_mcontext);
556 frame.sf_si = ksi->ksi_info;
557 frame.sf_uc.uc_sigmask = *mask;
558 frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
559 ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
560 frame.sf_uc.uc_stack = td->td_sigstk;
561 mtx_unlock(&psp->ps_mtx);
562 PROC_UNLOCK(td->td_proc);
563
564 /* Copy the sigframe out to the user's stack. */
565 if (copyout(&frame, fp, sizeof(*fp)) != 0) {
566 /* Process has trashed its stack. Kill it. */
567 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
568 PROC_LOCK(p);
569 sigexit(td, SIGILL);
570 }
571
572 tf->tf_x[0]= sig;
573 tf->tf_x[1] = (register_t)&fp->sf_si;
574 tf->tf_x[2] = (register_t)&fp->sf_uc;
575
576 tf->tf_elr = (register_t)catcher;
577 tf->tf_sp = (register_t)fp;
578 sysent = p->p_sysent;
579 if (sysent->sv_sigcode_base != 0)
580 tf->tf_lr = (register_t)sysent->sv_sigcode_base;
581 else
582 tf->tf_lr = (register_t)(sysent->sv_psstrings -
583 *(sysent->sv_szsigcode));
584
585 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
586 tf->tf_sp);
587
588 PROC_LOCK(p);
589 mtx_lock(&psp->ps_mtx);
590}
591
592static void
593init_proc0(vm_offset_t kstack)
594{
595 struct pcpu *pcpup = &__pcpu[0];
596
597 proc_linkup0(&proc0, &thread0);
598 thread0.td_kstack = kstack;
599 thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1;
600 thread0.td_pcb->pcb_fpflags = 0;
601 thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
602 thread0.td_frame = &proc0_tf;
603 pcpup->pc_curpcb = thread0.td_pcb;
604}
605
606typedef struct {
607 uint32_t type;
608 uint64_t phys_start;
609 uint64_t virt_start;
610 uint64_t num_pages;
611 uint64_t attr;
612} EFI_MEMORY_DESCRIPTOR;
613
614static int
615add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
616 u_int *physmap_idxp)
617{
618 u_int i, insert_idx, _physmap_idx;
619
620 _physmap_idx = *physmap_idxp;
621
622 if (length == 0)
623 return (1);
624
625 /*
626 * Find insertion point while checking for overlap. Start off by
627 * assuming the new entry will be added to the end.
628 */
629 insert_idx = _physmap_idx;
630 for (i = 0; i <= _physmap_idx; i += 2) {
631 if (base < physmap[i + 1]) {
632 if (base + length <= physmap[i]) {
633 insert_idx = i;
634 break;
635 }
636 if (boothowto & RB_VERBOSE)
637 printf(
638 "Overlapping memory regions, ignoring second region\n");
639 return (1);
640 }
641 }
642
643 /* See if we can prepend to the next entry. */
644 if (insert_idx <= _physmap_idx &&
645 base + length == physmap[insert_idx]) {
646 physmap[insert_idx] = base;
647 return (1);
648 }
649
650 /* See if we can append to the previous entry. */
651 if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
652 physmap[insert_idx - 1] += length;
653 return (1);
654 }
655
656 _physmap_idx += 2;
657 *physmap_idxp = _physmap_idx;
658 if (_physmap_idx == PHYSMAP_SIZE) {
659 printf(
660 "Too many segments in the physical address map, giving up\n");
661 return (0);
662 }
663
664 /*
665 * Move the last 'N' entries down to make room for the new
666 * entry if needed.
667 */
668 for (i = _physmap_idx; i > insert_idx; i -= 2) {
669 physmap[i] = physmap[i - 2];
670 physmap[i + 1] = physmap[i - 1];
671 }
672
673 /* Insert the new entry. */
674 physmap[insert_idx] = base;
675 physmap[insert_idx + 1] = base + length;
676 return (1);
677}
678
679#ifdef FDT
680static void
681add_fdt_mem_regions(struct mem_region *mr, int mrcnt, vm_paddr_t *physmap,
682 u_int *physmap_idxp)
683{
684
685 for (int i = 0; i < mrcnt; i++) {
686 if (!add_physmap_entry(mr[i].mr_start, mr[i].mr_size, physmap,
687 physmap_idxp))
688 break;
689 }
690}
691#endif
692
693static void
694add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
695 u_int *physmap_idxp)
696{
697 struct efi_md *map, *p;
698 const char *type;
699 size_t efisz;
700 int ndesc, i;
701
702 static const char *types[] = {
703 "Reserved",
704 "LoaderCode",
705 "LoaderData",
706 "BootServicesCode",
707 "BootServicesData",
708 "RuntimeServicesCode",
709 "RuntimeServicesData",
710 "ConventionalMemory",
711 "UnusableMemory",
712 "ACPIReclaimMemory",
713 "ACPIMemoryNVS",
714 "MemoryMappedIO",
715 "MemoryMappedIOPortSpace",
716 "PalCode",
717 "PersistentMemory"
718 };
719
720 /*
721 * Memory map data provided by UEFI via the GetMemoryMap
722 * Boot Services API.
723 */
724 efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
725 map = (struct efi_md *)((uint8_t *)efihdr + efisz);
726
727 if (efihdr->descriptor_size == 0)
728 return;
729 ndesc = efihdr->memory_size / efihdr->descriptor_size;
730
731 if (boothowto & RB_VERBOSE)
732 printf("%23s %12s %12s %8s %4s\n",
733 "Type", "Physical", "Virtual", "#Pages", "Attr");
734
735 for (i = 0, p = map; i < ndesc; i++,
736 p = efi_next_descriptor(p, efihdr->descriptor_size)) {
737 if (boothowto & RB_VERBOSE) {
738 if (p->md_type < nitems(types))
739 type = types[p->md_type];
740 else
741 type = "<INVALID>";
742 printf("%23s %012lx %12p %08lx ", type, p->md_phys,
743 p->md_virt, p->md_pages);
744 if (p->md_attr & EFI_MD_ATTR_UC)
745 printf("UC ");
746 if (p->md_attr & EFI_MD_ATTR_WC)
747 printf("WC ");
748 if (p->md_attr & EFI_MD_ATTR_WT)
749 printf("WT ");
750 if (p->md_attr & EFI_MD_ATTR_WB)
751 printf("WB ");
752 if (p->md_attr & EFI_MD_ATTR_UCE)
753 printf("UCE ");
754 if (p->md_attr & EFI_MD_ATTR_WP)
755 printf("WP ");
756 if (p->md_attr & EFI_MD_ATTR_RP)
757 printf("RP ");
758 if (p->md_attr & EFI_MD_ATTR_XP)
759 printf("XP ");
760 if (p->md_attr & EFI_MD_ATTR_NV)
761 printf("NV ");
762 if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
763 printf("MORE_RELIABLE ");
764 if (p->md_attr & EFI_MD_ATTR_RO)
765 printf("RO ");
766 if (p->md_attr & EFI_MD_ATTR_RT)
767 printf("RUNTIME");
768 printf("\n");
769 }
770
771 switch (p->md_type) {
772 case EFI_MD_TYPE_CODE:
773 case EFI_MD_TYPE_DATA:
774 case EFI_MD_TYPE_BS_CODE:
775 case EFI_MD_TYPE_BS_DATA:
776 case EFI_MD_TYPE_FREE:
777 /*
778 * We're allowed to use any entry with these types.
779 */
780 break;
781 default:
782 continue;
783 }
784
785 if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
786 physmap, physmap_idxp))
787 break;
788 }
789}
790
791#ifdef FDT
792static void
793try_load_dtb(caddr_t kmdp)
794{
795 vm_offset_t dtbp;
796
797 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
798 if (dtbp == (vm_offset_t)NULL) {
799 printf("ERROR loading DTB\n");
800 return;
801 }
802
803 if (OF_install(OFW_FDT, 0) == FALSE)
804 panic("Cannot install FDT");
805
806 if (OF_init((void *)dtbp) != 0)
807 panic("OF_init failed with the found device tree");
808}
809#endif
810
811static void
812cache_setup(void)
813{
814 int dcache_line_shift, icache_line_shift, dczva_line_shift;
815 uint32_t ctr_el0;
816 uint32_t dczid_el0;
817
818 ctr_el0 = READ_SPECIALREG(ctr_el0);
819
820 /* Read the log2 words in each D cache line */
821 dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
822 /* Get the D cache line size */
823 dcache_line_size = sizeof(int) << dcache_line_shift;
824
825 /* And the same for the I cache */
826 icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
827 icache_line_size = sizeof(int) << icache_line_shift;
828
829 idcache_line_size = MIN(dcache_line_size, icache_line_size);
830
831 dczid_el0 = READ_SPECIALREG(dczid_el0);
832
833 /* Check if dc zva is not prohibited */
834 if (dczid_el0 & DCZID_DZP)
835 dczva_line_size = 0;
836 else {
837 /* Same as with above calculations */
838 dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
839 dczva_line_size = sizeof(int) << dczva_line_shift;
840
841 /* Change pagezero function */
842 pagezero = pagezero_cache;
843 }
844}
845
846void
847initarm(struct arm64_bootparams *abp)
848{
849 struct efi_map_header *efihdr;
850 struct pcpu *pcpup;
851#ifdef FDT
852 struct mem_region mem_regions[FDT_MEM_REGIONS];
853 int mem_regions_sz;
854#endif
855 vm_offset_t lastaddr;
856 caddr_t kmdp;
857 vm_paddr_t mem_len;
858 int i;
859
860 /* Set the module data location */
861 preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
862
863 /* Find the kernel address */
864 kmdp = preload_search_by_type("elf kernel");
865 if (kmdp == NULL)
866 kmdp = preload_search_by_type("elf64 kernel");
867
868 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
869 init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0);
870
871#ifdef FDT
872 try_load_dtb(kmdp);
873#endif
874
875 /* Find the address to start allocating from */
876 lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
877
878 /* Load the physical memory ranges */
879 physmap_idx = 0;
880 efihdr = (struct efi_map_header *)preload_search_info(kmdp,
881 MODINFO_METADATA | MODINFOMD_EFI_MAP);
882 if (efihdr != NULL)
883 add_efi_map_entries(efihdr, physmap, &physmap_idx);
884#ifdef FDT
885 else {
886 /* Grab physical memory regions information from device tree. */
887 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
888 NULL) != 0)
889 panic("Cannot get physical memory regions");
890 add_fdt_mem_regions(mem_regions, mem_regions_sz, physmap,
891 &physmap_idx);
892 }
893#endif
894
895 /* Print the memory map */
896 mem_len = 0;
897 for (i = 0; i < physmap_idx; i += 2) {
898 dump_avail[i] = physmap[i];
899 dump_avail[i + 1] = physmap[i + 1];
900 mem_len += physmap[i + 1] - physmap[i];
901 }
902 dump_avail[i] = 0;
903 dump_avail[i + 1] = 0;
904
905 /* Set the pcpu data, this is needed by pmap_bootstrap */
906 pcpup = &__pcpu[0];
907 pcpu_init(pcpup, 0, sizeof(struct pcpu));
908
909 /*
910 * Set the pcpu pointer with a backup in tpidr_el1 to be
911 * loaded when entering the kernel from userland.
912 */
913 __asm __volatile(
914 "mov x18, %0 \n"
915 "msr tpidr_el1, %0" :: "r"(pcpup));
916
917 PCPU_SET(curthread, &thread0);
918
919 /* Do basic tuning, hz etc */
920 init_param1();
921
922 cache_setup();
153cpu_startup(void *dummy)
154{
155
156 identify_cpu();
157
158 vm_ksubmap_init(&kmi);
159 bufinit();
160 vm_pager_bufferinit();
161}
162
163SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
164
165int
166cpu_idle_wakeup(int cpu)
167{
168
169 return (0);
170}
171
172int
173fill_regs(struct thread *td, struct reg *regs)
174{
175 struct trapframe *frame;
176
177 frame = td->td_frame;
178 regs->sp = frame->tf_sp;
179 regs->lr = frame->tf_lr;
180 regs->elr = frame->tf_elr;
181 regs->spsr = frame->tf_spsr;
182
183 memcpy(regs->x, frame->tf_x, sizeof(regs->x));
184
185 return (0);
186}
187
188int
189set_regs(struct thread *td, struct reg *regs)
190{
191 struct trapframe *frame;
192
193 frame = td->td_frame;
194 frame->tf_sp = regs->sp;
195 frame->tf_lr = regs->lr;
196 frame->tf_elr = regs->elr;
197 frame->tf_spsr = regs->spsr;
198
199 memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
200
201 return (0);
202}
203
204int
205fill_fpregs(struct thread *td, struct fpreg *regs)
206{
207#ifdef VFP
208 struct pcb *pcb;
209
210 pcb = td->td_pcb;
211 if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
212 /*
213 * If we have just been running VFP instructions we will
214 * need to save the state to memcpy it below.
215 */
216 vfp_save_state(td, pcb);
217
218 memcpy(regs->fp_q, pcb->pcb_vfp, sizeof(regs->fp_q));
219 regs->fp_cr = pcb->pcb_fpcr;
220 regs->fp_sr = pcb->pcb_fpsr;
221 } else
222#endif
223 memset(regs->fp_q, 0, sizeof(regs->fp_q));
224 return (0);
225}
226
227int
228set_fpregs(struct thread *td, struct fpreg *regs)
229{
230#ifdef VFP
231 struct pcb *pcb;
232
233 pcb = td->td_pcb;
234 memcpy(pcb->pcb_vfp, regs->fp_q, sizeof(regs->fp_q));
235 pcb->pcb_fpcr = regs->fp_cr;
236 pcb->pcb_fpsr = regs->fp_sr;
237#endif
238 return (0);
239}
240
241int
242fill_dbregs(struct thread *td, struct dbreg *regs)
243{
244
245 panic("ARM64TODO: fill_dbregs");
246}
247
248int
249set_dbregs(struct thread *td, struct dbreg *regs)
250{
251
252 panic("ARM64TODO: set_dbregs");
253}
254
255int
256ptrace_set_pc(struct thread *td, u_long addr)
257{
258
259 panic("ARM64TODO: ptrace_set_pc");
260 return (0);
261}
262
263int
264ptrace_single_step(struct thread *td)
265{
266
267 td->td_frame->tf_spsr |= PSR_SS;
268 td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
269 return (0);
270}
271
272int
273ptrace_clear_single_step(struct thread *td)
274{
275
276 td->td_frame->tf_spsr &= ~PSR_SS;
277 td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
278 return (0);
279}
280
281void
282exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
283{
284 struct trapframe *tf = td->td_frame;
285
286 memset(tf, 0, sizeof(struct trapframe));
287
288 /*
289 * We need to set x0 for init as it doesn't call
290 * cpu_set_syscall_retval to copy the value. We also
291 * need to set td_retval for the cases where we do.
292 */
293 tf->tf_x[0] = td->td_retval[0] = stack;
294 tf->tf_sp = STACKALIGN(stack);
295 tf->tf_lr = imgp->entry_addr;
296 tf->tf_elr = imgp->entry_addr;
297}
298
299/* Sanity check these are the same size, they will be memcpy'd to and fro */
300CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
301 sizeof((struct gpregs *)0)->gp_x);
302CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
303 sizeof((struct reg *)0)->x);
304
305int
306get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
307{
308 struct trapframe *tf = td->td_frame;
309
310 if (clear_ret & GET_MC_CLEAR_RET) {
311 mcp->mc_gpregs.gp_x[0] = 0;
312 mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
313 } else {
314 mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
315 mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
316 }
317
318 memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
319 sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
320
321 mcp->mc_gpregs.gp_sp = tf->tf_sp;
322 mcp->mc_gpregs.gp_lr = tf->tf_lr;
323 mcp->mc_gpregs.gp_elr = tf->tf_elr;
324
325 return (0);
326}
327
328int
329set_mcontext(struct thread *td, mcontext_t *mcp)
330{
331 struct trapframe *tf = td->td_frame;
332
333 memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
334
335 tf->tf_sp = mcp->mc_gpregs.gp_sp;
336 tf->tf_lr = mcp->mc_gpregs.gp_lr;
337 tf->tf_elr = mcp->mc_gpregs.gp_elr;
338 tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
339
340 return (0);
341}
342
343static void
344get_fpcontext(struct thread *td, mcontext_t *mcp)
345{
346#ifdef VFP
347 struct pcb *curpcb;
348
349 critical_enter();
350
351 curpcb = curthread->td_pcb;
352
353 if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
354 /*
355 * If we have just been running VFP instructions we will
356 * need to save the state to memcpy it below.
357 */
358 vfp_save_state(td, curpcb);
359
360 memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_vfp,
361 sizeof(mcp->mc_fpregs));
362 mcp->mc_fpregs.fp_cr = curpcb->pcb_fpcr;
363 mcp->mc_fpregs.fp_sr = curpcb->pcb_fpsr;
364 mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
365 mcp->mc_flags |= _MC_FP_VALID;
366 }
367
368 critical_exit();
369#endif
370}
371
372static void
373set_fpcontext(struct thread *td, mcontext_t *mcp)
374{
375#ifdef VFP
376 struct pcb *curpcb;
377
378 critical_enter();
379
380 if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
381 curpcb = curthread->td_pcb;
382
383 /*
384 * Discard any vfp state for the current thread, we
385 * are about to override it.
386 */
387 vfp_discard(td);
388
389 memcpy(curpcb->pcb_vfp, mcp->mc_fpregs.fp_q,
390 sizeof(mcp->mc_fpregs));
391 curpcb->pcb_fpcr = mcp->mc_fpregs.fp_cr;
392 curpcb->pcb_fpsr = mcp->mc_fpregs.fp_sr;
393 curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags;
394 }
395
396 critical_exit();
397#endif
398}
399
400void
401cpu_idle(int busy)
402{
403
404 spinlock_enter();
405 if (!busy)
406 cpu_idleclock();
407 if (!sched_runnable())
408 __asm __volatile(
409 "dsb sy \n"
410 "wfi \n");
411 if (!busy)
412 cpu_activeclock();
413 spinlock_exit();
414}
415
416void
417cpu_halt(void)
418{
419
420 /* We should have shutdown by now, if not enter a low power sleep */
421 intr_disable();
422 while (1) {
423 __asm __volatile("wfi");
424 }
425}
426
427/*
428 * Flush the D-cache for non-DMA I/O so that the I-cache can
429 * be made coherent later.
430 */
431void
432cpu_flush_dcache(void *ptr, size_t len)
433{
434
435 /* ARM64TODO TBD */
436}
437
438/* Get current clock frequency for the given CPU ID. */
439int
440cpu_est_clockrate(int cpu_id, uint64_t *rate)
441{
442 struct pcpu *pc;
443
444 pc = pcpu_find(cpu_id);
445 if (pc == NULL || rate == NULL)
446 return (EINVAL);
447
448 if (pc->pc_clock == 0)
449 return (EOPNOTSUPP);
450
451 *rate = pc->pc_clock;
452 return (0);
453}
454
455void
456cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
457{
458
459 pcpu->pc_acpi_id = 0xffffffff;
460}
461
462void
463spinlock_enter(void)
464{
465 struct thread *td;
466 register_t daif;
467
468 td = curthread;
469 if (td->td_md.md_spinlock_count == 0) {
470 daif = intr_disable();
471 td->td_md.md_spinlock_count = 1;
472 td->td_md.md_saved_daif = daif;
473 } else
474 td->td_md.md_spinlock_count++;
475 critical_enter();
476}
477
478void
479spinlock_exit(void)
480{
481 struct thread *td;
482 register_t daif;
483
484 td = curthread;
485 critical_exit();
486 daif = td->td_md.md_saved_daif;
487 td->td_md.md_spinlock_count--;
488 if (td->td_md.md_spinlock_count == 0)
489 intr_restore(daif);
490}
491
492#ifndef _SYS_SYSPROTO_H_
493struct sigreturn_args {
494 ucontext_t *ucp;
495};
496#endif
497
498int
499sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
500{
501 ucontext_t uc;
502 uint32_t spsr;
503
504 if (uap == NULL)
505 return (EFAULT);
506 if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
507 return (EFAULT);
508
509 spsr = uc.uc_mcontext.mc_gpregs.gp_spsr;
510 if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
511 (spsr & (PSR_F | PSR_I | PSR_A | PSR_D)) != 0)
512 return (EINVAL);
513
514 set_mcontext(td, &uc.uc_mcontext);
515 set_fpcontext(td, &uc.uc_mcontext);
516
517 /* Restore signal mask. */
518 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
519
520 return (EJUSTRETURN);
521}
522
523/*
524 * Construct a PCB from a trapframe. This is called from kdb_trap() where
525 * we want to start a backtrace from the function that caused us to enter
526 * the debugger. We have the context in the trapframe, but base the trace
527 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
528 * enough for a backtrace.
529 */
530void
531makectx(struct trapframe *tf, struct pcb *pcb)
532{
533 int i;
534
535 for (i = 0; i < PCB_LR; i++)
536 pcb->pcb_x[i] = tf->tf_x[i];
537
538 pcb->pcb_x[PCB_LR] = tf->tf_lr;
539 pcb->pcb_pc = tf->tf_elr;
540 pcb->pcb_sp = tf->tf_sp;
541}
542
543void
544sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
545{
546 struct thread *td;
547 struct proc *p;
548 struct trapframe *tf;
549 struct sigframe *fp, frame;
550 struct sigacts *psp;
551 struct sysentvec *sysent;
552 int code, onstack, sig;
553
554 td = curthread;
555 p = td->td_proc;
556 PROC_LOCK_ASSERT(p, MA_OWNED);
557
558 sig = ksi->ksi_signo;
559 code = ksi->ksi_code;
560 psp = p->p_sigacts;
561 mtx_assert(&psp->ps_mtx, MA_OWNED);
562
563 tf = td->td_frame;
564 onstack = sigonstack(tf->tf_sp);
565
566 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
567 catcher, sig);
568
569 /* Allocate and validate space for the signal handler context. */
570 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
571 SIGISMEMBER(psp->ps_sigonstack, sig)) {
572 fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
573 td->td_sigstk.ss_size);
574#if defined(COMPAT_43)
575 td->td_sigstk.ss_flags |= SS_ONSTACK;
576#endif
577 } else {
578 fp = (struct sigframe *)td->td_frame->tf_sp;
579 }
580
581 /* Make room, keeping the stack aligned */
582 fp--;
583 fp = (struct sigframe *)STACKALIGN(fp);
584
585 /* Fill in the frame to copy out */
586 get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
587 get_fpcontext(td, &frame.sf_uc.uc_mcontext);
588 frame.sf_si = ksi->ksi_info;
589 frame.sf_uc.uc_sigmask = *mask;
590 frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
591 ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
592 frame.sf_uc.uc_stack = td->td_sigstk;
593 mtx_unlock(&psp->ps_mtx);
594 PROC_UNLOCK(td->td_proc);
595
596 /* Copy the sigframe out to the user's stack. */
597 if (copyout(&frame, fp, sizeof(*fp)) != 0) {
598 /* Process has trashed its stack. Kill it. */
599 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
600 PROC_LOCK(p);
601 sigexit(td, SIGILL);
602 }
603
604 tf->tf_x[0]= sig;
605 tf->tf_x[1] = (register_t)&fp->sf_si;
606 tf->tf_x[2] = (register_t)&fp->sf_uc;
607
608 tf->tf_elr = (register_t)catcher;
609 tf->tf_sp = (register_t)fp;
610 sysent = p->p_sysent;
611 if (sysent->sv_sigcode_base != 0)
612 tf->tf_lr = (register_t)sysent->sv_sigcode_base;
613 else
614 tf->tf_lr = (register_t)(sysent->sv_psstrings -
615 *(sysent->sv_szsigcode));
616
617 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
618 tf->tf_sp);
619
620 PROC_LOCK(p);
621 mtx_lock(&psp->ps_mtx);
622}
623
624static void
625init_proc0(vm_offset_t kstack)
626{
627 struct pcpu *pcpup = &__pcpu[0];
628
629 proc_linkup0(&proc0, &thread0);
630 thread0.td_kstack = kstack;
631 thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1;
632 thread0.td_pcb->pcb_fpflags = 0;
633 thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
634 thread0.td_frame = &proc0_tf;
635 pcpup->pc_curpcb = thread0.td_pcb;
636}
637
638typedef struct {
639 uint32_t type;
640 uint64_t phys_start;
641 uint64_t virt_start;
642 uint64_t num_pages;
643 uint64_t attr;
644} EFI_MEMORY_DESCRIPTOR;
645
646static int
647add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
648 u_int *physmap_idxp)
649{
650 u_int i, insert_idx, _physmap_idx;
651
652 _physmap_idx = *physmap_idxp;
653
654 if (length == 0)
655 return (1);
656
657 /*
658 * Find insertion point while checking for overlap. Start off by
659 * assuming the new entry will be added to the end.
660 */
661 insert_idx = _physmap_idx;
662 for (i = 0; i <= _physmap_idx; i += 2) {
663 if (base < physmap[i + 1]) {
664 if (base + length <= physmap[i]) {
665 insert_idx = i;
666 break;
667 }
668 if (boothowto & RB_VERBOSE)
669 printf(
670 "Overlapping memory regions, ignoring second region\n");
671 return (1);
672 }
673 }
674
675 /* See if we can prepend to the next entry. */
676 if (insert_idx <= _physmap_idx &&
677 base + length == physmap[insert_idx]) {
678 physmap[insert_idx] = base;
679 return (1);
680 }
681
682 /* See if we can append to the previous entry. */
683 if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
684 physmap[insert_idx - 1] += length;
685 return (1);
686 }
687
688 _physmap_idx += 2;
689 *physmap_idxp = _physmap_idx;
690 if (_physmap_idx == PHYSMAP_SIZE) {
691 printf(
692 "Too many segments in the physical address map, giving up\n");
693 return (0);
694 }
695
696 /*
697 * Move the last 'N' entries down to make room for the new
698 * entry if needed.
699 */
700 for (i = _physmap_idx; i > insert_idx; i -= 2) {
701 physmap[i] = physmap[i - 2];
702 physmap[i + 1] = physmap[i - 1];
703 }
704
705 /* Insert the new entry. */
706 physmap[insert_idx] = base;
707 physmap[insert_idx + 1] = base + length;
708 return (1);
709}
710
711#ifdef FDT
712static void
713add_fdt_mem_regions(struct mem_region *mr, int mrcnt, vm_paddr_t *physmap,
714 u_int *physmap_idxp)
715{
716
717 for (int i = 0; i < mrcnt; i++) {
718 if (!add_physmap_entry(mr[i].mr_start, mr[i].mr_size, physmap,
719 physmap_idxp))
720 break;
721 }
722}
723#endif
724
725static void
726add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
727 u_int *physmap_idxp)
728{
729 struct efi_md *map, *p;
730 const char *type;
731 size_t efisz;
732 int ndesc, i;
733
734 static const char *types[] = {
735 "Reserved",
736 "LoaderCode",
737 "LoaderData",
738 "BootServicesCode",
739 "BootServicesData",
740 "RuntimeServicesCode",
741 "RuntimeServicesData",
742 "ConventionalMemory",
743 "UnusableMemory",
744 "ACPIReclaimMemory",
745 "ACPIMemoryNVS",
746 "MemoryMappedIO",
747 "MemoryMappedIOPortSpace",
748 "PalCode",
749 "PersistentMemory"
750 };
751
752 /*
753 * Memory map data provided by UEFI via the GetMemoryMap
754 * Boot Services API.
755 */
756 efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
757 map = (struct efi_md *)((uint8_t *)efihdr + efisz);
758
759 if (efihdr->descriptor_size == 0)
760 return;
761 ndesc = efihdr->memory_size / efihdr->descriptor_size;
762
763 if (boothowto & RB_VERBOSE)
764 printf("%23s %12s %12s %8s %4s\n",
765 "Type", "Physical", "Virtual", "#Pages", "Attr");
766
767 for (i = 0, p = map; i < ndesc; i++,
768 p = efi_next_descriptor(p, efihdr->descriptor_size)) {
769 if (boothowto & RB_VERBOSE) {
770 if (p->md_type < nitems(types))
771 type = types[p->md_type];
772 else
773 type = "<INVALID>";
774 printf("%23s %012lx %12p %08lx ", type, p->md_phys,
775 p->md_virt, p->md_pages);
776 if (p->md_attr & EFI_MD_ATTR_UC)
777 printf("UC ");
778 if (p->md_attr & EFI_MD_ATTR_WC)
779 printf("WC ");
780 if (p->md_attr & EFI_MD_ATTR_WT)
781 printf("WT ");
782 if (p->md_attr & EFI_MD_ATTR_WB)
783 printf("WB ");
784 if (p->md_attr & EFI_MD_ATTR_UCE)
785 printf("UCE ");
786 if (p->md_attr & EFI_MD_ATTR_WP)
787 printf("WP ");
788 if (p->md_attr & EFI_MD_ATTR_RP)
789 printf("RP ");
790 if (p->md_attr & EFI_MD_ATTR_XP)
791 printf("XP ");
792 if (p->md_attr & EFI_MD_ATTR_NV)
793 printf("NV ");
794 if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
795 printf("MORE_RELIABLE ");
796 if (p->md_attr & EFI_MD_ATTR_RO)
797 printf("RO ");
798 if (p->md_attr & EFI_MD_ATTR_RT)
799 printf("RUNTIME");
800 printf("\n");
801 }
802
803 switch (p->md_type) {
804 case EFI_MD_TYPE_CODE:
805 case EFI_MD_TYPE_DATA:
806 case EFI_MD_TYPE_BS_CODE:
807 case EFI_MD_TYPE_BS_DATA:
808 case EFI_MD_TYPE_FREE:
809 /*
810 * We're allowed to use any entry with these types.
811 */
812 break;
813 default:
814 continue;
815 }
816
817 if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
818 physmap, physmap_idxp))
819 break;
820 }
821}
822
823#ifdef FDT
824static void
825try_load_dtb(caddr_t kmdp)
826{
827 vm_offset_t dtbp;
828
829 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
830 if (dtbp == (vm_offset_t)NULL) {
831 printf("ERROR loading DTB\n");
832 return;
833 }
834
835 if (OF_install(OFW_FDT, 0) == FALSE)
836 panic("Cannot install FDT");
837
838 if (OF_init((void *)dtbp) != 0)
839 panic("OF_init failed with the found device tree");
840}
841#endif
842
843static void
844cache_setup(void)
845{
846 int dcache_line_shift, icache_line_shift, dczva_line_shift;
847 uint32_t ctr_el0;
848 uint32_t dczid_el0;
849
850 ctr_el0 = READ_SPECIALREG(ctr_el0);
851
852 /* Read the log2 words in each D cache line */
853 dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
854 /* Get the D cache line size */
855 dcache_line_size = sizeof(int) << dcache_line_shift;
856
857 /* And the same for the I cache */
858 icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
859 icache_line_size = sizeof(int) << icache_line_shift;
860
861 idcache_line_size = MIN(dcache_line_size, icache_line_size);
862
863 dczid_el0 = READ_SPECIALREG(dczid_el0);
864
865 /* Check if dc zva is not prohibited */
866 if (dczid_el0 & DCZID_DZP)
867 dczva_line_size = 0;
868 else {
869 /* Same as with above calculations */
870 dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
871 dczva_line_size = sizeof(int) << dczva_line_shift;
872
873 /* Change pagezero function */
874 pagezero = pagezero_cache;
875 }
876}
877
878void
879initarm(struct arm64_bootparams *abp)
880{
881 struct efi_map_header *efihdr;
882 struct pcpu *pcpup;
883#ifdef FDT
884 struct mem_region mem_regions[FDT_MEM_REGIONS];
885 int mem_regions_sz;
886#endif
887 vm_offset_t lastaddr;
888 caddr_t kmdp;
889 vm_paddr_t mem_len;
890 int i;
891
892 /* Set the module data location */
893 preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
894
895 /* Find the kernel address */
896 kmdp = preload_search_by_type("elf kernel");
897 if (kmdp == NULL)
898 kmdp = preload_search_by_type("elf64 kernel");
899
900 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
901 init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0);
902
903#ifdef FDT
904 try_load_dtb(kmdp);
905#endif
906
907 /* Find the address to start allocating from */
908 lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
909
910 /* Load the physical memory ranges */
911 physmap_idx = 0;
912 efihdr = (struct efi_map_header *)preload_search_info(kmdp,
913 MODINFO_METADATA | MODINFOMD_EFI_MAP);
914 if (efihdr != NULL)
915 add_efi_map_entries(efihdr, physmap, &physmap_idx);
916#ifdef FDT
917 else {
918 /* Grab physical memory regions information from device tree. */
919 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
920 NULL) != 0)
921 panic("Cannot get physical memory regions");
922 add_fdt_mem_regions(mem_regions, mem_regions_sz, physmap,
923 &physmap_idx);
924 }
925#endif
926
927 /* Print the memory map */
928 mem_len = 0;
929 for (i = 0; i < physmap_idx; i += 2) {
930 dump_avail[i] = physmap[i];
931 dump_avail[i + 1] = physmap[i + 1];
932 mem_len += physmap[i + 1] - physmap[i];
933 }
934 dump_avail[i] = 0;
935 dump_avail[i + 1] = 0;
936
937 /* Set the pcpu data, this is needed by pmap_bootstrap */
938 pcpup = &__pcpu[0];
939 pcpu_init(pcpup, 0, sizeof(struct pcpu));
940
941 /*
942 * Set the pcpu pointer with a backup in tpidr_el1 to be
943 * loaded when entering the kernel from userland.
944 */
945 __asm __volatile(
946 "mov x18, %0 \n"
947 "msr tpidr_el1, %0" :: "r"(pcpup));
948
949 PCPU_SET(curthread, &thread0);
950
951 /* Do basic tuning, hz etc */
952 init_param1();
953
954 cache_setup();
955 pan_setup();
923
924 /* Bootstrap enough of pmap to enter the kernel proper */
925 pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
926 KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
927
928 devmap_bootstrap(0, NULL);
929
930 cninit();
931
932 init_proc0(abp->kern_stack);
933 msgbufinit(msgbufp, msgbufsize);
934 mutex_init();
935 init_param2(physmem);
936
937 dbg_monitor_init();
938 kdb_init();
956
957 /* Bootstrap enough of pmap to enter the kernel proper */
958 pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
959 KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
960
961 devmap_bootstrap(0, NULL);
962
963 cninit();
964
965 init_proc0(abp->kern_stack);
966 msgbufinit(msgbufp, msgbufsize);
967 mutex_init();
968 init_param2(physmem);
969
970 dbg_monitor_init();
971 kdb_init();
972 pan_enable();
939
940 early_boot = 0;
941}
942
943#ifdef DDB
944#include <ddb/ddb.h>
945
946DB_SHOW_COMMAND(specialregs, db_show_spregs)
947{
948#define PRINT_REG(reg) \
949 db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
950
951 PRINT_REG(actlr_el1);
952 PRINT_REG(afsr0_el1);
953 PRINT_REG(afsr1_el1);
954 PRINT_REG(aidr_el1);
955 PRINT_REG(amair_el1);
956 PRINT_REG(ccsidr_el1);
957 PRINT_REG(clidr_el1);
958 PRINT_REG(contextidr_el1);
959 PRINT_REG(cpacr_el1);
960 PRINT_REG(csselr_el1);
961 PRINT_REG(ctr_el0);
962 PRINT_REG(currentel);
963 PRINT_REG(daif);
964 PRINT_REG(dczid_el0);
965 PRINT_REG(elr_el1);
966 PRINT_REG(esr_el1);
967 PRINT_REG(far_el1);
968#if 0
969 /* ARM64TODO: Enable VFP before reading floating-point registers */
970 PRINT_REG(fpcr);
971 PRINT_REG(fpsr);
972#endif
973 PRINT_REG(id_aa64afr0_el1);
974 PRINT_REG(id_aa64afr1_el1);
975 PRINT_REG(id_aa64dfr0_el1);
976 PRINT_REG(id_aa64dfr1_el1);
977 PRINT_REG(id_aa64isar0_el1);
978 PRINT_REG(id_aa64isar1_el1);
979 PRINT_REG(id_aa64pfr0_el1);
980 PRINT_REG(id_aa64pfr1_el1);
981 PRINT_REG(id_afr0_el1);
982 PRINT_REG(id_dfr0_el1);
983 PRINT_REG(id_isar0_el1);
984 PRINT_REG(id_isar1_el1);
985 PRINT_REG(id_isar2_el1);
986 PRINT_REG(id_isar3_el1);
987 PRINT_REG(id_isar4_el1);
988 PRINT_REG(id_isar5_el1);
989 PRINT_REG(id_mmfr0_el1);
990 PRINT_REG(id_mmfr1_el1);
991 PRINT_REG(id_mmfr2_el1);
992 PRINT_REG(id_mmfr3_el1);
993#if 0
994 /* Missing from llvm */
995 PRINT_REG(id_mmfr4_el1);
996#endif
997 PRINT_REG(id_pfr0_el1);
998 PRINT_REG(id_pfr1_el1);
999 PRINT_REG(isr_el1);
1000 PRINT_REG(mair_el1);
1001 PRINT_REG(midr_el1);
1002 PRINT_REG(mpidr_el1);
1003 PRINT_REG(mvfr0_el1);
1004 PRINT_REG(mvfr1_el1);
1005 PRINT_REG(mvfr2_el1);
1006 PRINT_REG(revidr_el1);
1007 PRINT_REG(sctlr_el1);
1008 PRINT_REG(sp_el0);
1009 PRINT_REG(spsel);
1010 PRINT_REG(spsr_el1);
1011 PRINT_REG(tcr_el1);
1012 PRINT_REG(tpidr_el0);
1013 PRINT_REG(tpidr_el1);
1014 PRINT_REG(tpidrro_el0);
1015 PRINT_REG(ttbr0_el1);
1016 PRINT_REG(ttbr1_el1);
1017 PRINT_REG(vbar_el1);
1018#undef PRINT_REG
1019}
1020
1021DB_SHOW_COMMAND(vtop, db_show_vtop)
1022{
1023 uint64_t phys;
1024
1025 if (have_addr) {
1026 phys = arm64_address_translate_s1e1r(addr);
1027 db_printf("Physical address reg (read): 0x%016lx\n", phys);
1028 phys = arm64_address_translate_s1e1w(addr);
1029 db_printf("Physical address reg (write): 0x%016lx\n", phys);
1030 } else
1031 db_printf("show vtop <virt_addr>\n");
1032}
1033#endif
973
974 early_boot = 0;
975}
976
977#ifdef DDB
978#include <ddb/ddb.h>
979
980DB_SHOW_COMMAND(specialregs, db_show_spregs)
981{
982#define PRINT_REG(reg) \
983 db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
984
985 PRINT_REG(actlr_el1);
986 PRINT_REG(afsr0_el1);
987 PRINT_REG(afsr1_el1);
988 PRINT_REG(aidr_el1);
989 PRINT_REG(amair_el1);
990 PRINT_REG(ccsidr_el1);
991 PRINT_REG(clidr_el1);
992 PRINT_REG(contextidr_el1);
993 PRINT_REG(cpacr_el1);
994 PRINT_REG(csselr_el1);
995 PRINT_REG(ctr_el0);
996 PRINT_REG(currentel);
997 PRINT_REG(daif);
998 PRINT_REG(dczid_el0);
999 PRINT_REG(elr_el1);
1000 PRINT_REG(esr_el1);
1001 PRINT_REG(far_el1);
1002#if 0
1003 /* ARM64TODO: Enable VFP before reading floating-point registers */
1004 PRINT_REG(fpcr);
1005 PRINT_REG(fpsr);
1006#endif
1007 PRINT_REG(id_aa64afr0_el1);
1008 PRINT_REG(id_aa64afr1_el1);
1009 PRINT_REG(id_aa64dfr0_el1);
1010 PRINT_REG(id_aa64dfr1_el1);
1011 PRINT_REG(id_aa64isar0_el1);
1012 PRINT_REG(id_aa64isar1_el1);
1013 PRINT_REG(id_aa64pfr0_el1);
1014 PRINT_REG(id_aa64pfr1_el1);
1015 PRINT_REG(id_afr0_el1);
1016 PRINT_REG(id_dfr0_el1);
1017 PRINT_REG(id_isar0_el1);
1018 PRINT_REG(id_isar1_el1);
1019 PRINT_REG(id_isar2_el1);
1020 PRINT_REG(id_isar3_el1);
1021 PRINT_REG(id_isar4_el1);
1022 PRINT_REG(id_isar5_el1);
1023 PRINT_REG(id_mmfr0_el1);
1024 PRINT_REG(id_mmfr1_el1);
1025 PRINT_REG(id_mmfr2_el1);
1026 PRINT_REG(id_mmfr3_el1);
1027#if 0
1028 /* Missing from llvm */
1029 PRINT_REG(id_mmfr4_el1);
1030#endif
1031 PRINT_REG(id_pfr0_el1);
1032 PRINT_REG(id_pfr1_el1);
1033 PRINT_REG(isr_el1);
1034 PRINT_REG(mair_el1);
1035 PRINT_REG(midr_el1);
1036 PRINT_REG(mpidr_el1);
1037 PRINT_REG(mvfr0_el1);
1038 PRINT_REG(mvfr1_el1);
1039 PRINT_REG(mvfr2_el1);
1040 PRINT_REG(revidr_el1);
1041 PRINT_REG(sctlr_el1);
1042 PRINT_REG(sp_el0);
1043 PRINT_REG(spsel);
1044 PRINT_REG(spsr_el1);
1045 PRINT_REG(tcr_el1);
1046 PRINT_REG(tpidr_el0);
1047 PRINT_REG(tpidr_el1);
1048 PRINT_REG(tpidrro_el0);
1049 PRINT_REG(ttbr0_el1);
1050 PRINT_REG(ttbr1_el1);
1051 PRINT_REG(vbar_el1);
1052#undef PRINT_REG
1053}
1054
1055DB_SHOW_COMMAND(vtop, db_show_vtop)
1056{
1057 uint64_t phys;
1058
1059 if (have_addr) {
1060 phys = arm64_address_translate_s1e1r(addr);
1061 db_printf("Physical address reg (read): 0x%016lx\n", phys);
1062 phys = arm64_address_translate_s1e1w(addr);
1063 db_printf("Physical address reg (write): 0x%016lx\n", phys);
1064 } else
1065 db_printf("show vtop <virt_addr>\n");
1066}
1067#endif