1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/param.h>
29#include <sys/systm.h>
30#include <sys/exec.h>
31#include <sys/imgact.h>
32#include <sys/kdb.h>
33#include <sys/kernel.h>
34#include <sys/ktr.h>
35#include <sys/limits.h>
36#include <sys/lock.h>
37#include <sys/mutex.h>
38#include <sys/proc.h>
39#include <sys/ptrace.h>
40#include <sys/reg.h>
41#include <sys/rwlock.h>
42#include <sys/signalvar.h>
43#include <sys/syscallsubr.h>
44#include <sys/sysent.h>
45#include <sys/sysproto.h>
46#include <sys/ucontext.h>
47
48#include <vm/vm.h>
49#include <vm/vm_param.h>
50#include <vm/pmap.h>
51#include <vm/vm_map.h>
52
53#include <machine/armreg.h>
54#include <machine/kdb.h>
55#include <machine/md_var.h>
56#include <machine/pcb.h>
57
58#ifdef VFP
59#include <machine/vfp.h>
60#endif
61
62_Static_assert(sizeof(mcontext_t) == 880, "mcontext_t size incorrect");
63_Static_assert(sizeof(ucontext_t) == 960, "ucontext_t size incorrect");
64_Static_assert(sizeof(siginfo_t) == 80, "siginfo_t size incorrect");
65
66static void get_fpcontext(struct thread *td, mcontext_t *mcp);
67static void set_fpcontext(struct thread *td, mcontext_t *mcp);
68
69int
70fill_regs(struct thread *td, struct reg *regs)
71{
72	struct trapframe *frame;
73
74	frame = td->td_frame;
75	regs->sp = frame->tf_sp;
76	regs->lr = frame->tf_lr;
77	regs->elr = frame->tf_elr;
78	regs->spsr = frame->tf_spsr;
79
80	memcpy(regs->x, frame->tf_x, sizeof(regs->x));
81
82#ifdef COMPAT_FREEBSD32
83	/*
84	 * We may be called here for a 32bits process, if we're using a
85	 * 64bits debugger. If so, put PC and SPSR where it expects it.
86	 */
87	if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
88		regs->x[15] = frame->tf_elr;
89		regs->x[16] = frame->tf_spsr;
90	}
91#endif
92	return (0);
93}
94
95int
96set_regs(struct thread *td, struct reg *regs)
97{
98	struct trapframe *frame;
99
100	frame = td->td_frame;
101	frame->tf_sp = regs->sp;
102	frame->tf_lr = regs->lr;
103
104	memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
105
106#ifdef COMPAT_FREEBSD32
107	if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
108		/*
109		 * We may be called for a 32bits process if we're using
110		 * a 64bits debugger. If so, get PC and SPSR from where
111		 * it put it.
112		 */
113		frame->tf_elr = regs->x[15];
114		frame->tf_spsr &= ~PSR_SETTABLE_32;
115		frame->tf_spsr |= regs->x[16] & PSR_SETTABLE_32;
116		/* Don't allow userspace to ask to continue single stepping.
117		 * The SPSR.SS field doesn't exist when the EL1 is AArch32.
118		 * As the SPSR.DIT field has moved in its place don't
119		 * allow userspace to set the SPSR.SS field.
120		 */
121	} else
122#endif
123	{
124		frame->tf_elr = regs->elr;
125		/*
126		 * frame->tf_spsr and regs->spsr on FreeBSD 13 was 32-bit
127		 * where from 14 they are 64 bit. As PSR_SETTABLE_64 clears
128		 * the upper 32 bits no compatibility handling is needed,
129		 * however if this is ever not the case we will need to add
130		 * these, similar to how it is done in set_mcontext.
131		 */
132		frame->tf_spsr &= ~PSR_SETTABLE_64;
133		frame->tf_spsr |= regs->spsr & PSR_SETTABLE_64;
134		/* Enable single stepping if userspace asked fot it */
135		if ((frame->tf_spsr & PSR_SS) != 0) {
136			td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
137
138			WRITE_SPECIALREG(mdscr_el1,
139			    READ_SPECIALREG(mdscr_el1) | MDSCR_SS);
140			isb();
141		}
142	}
143	return (0);
144}
145
146int
147fill_fpregs(struct thread *td, struct fpreg *regs)
148{
149#ifdef VFP
150	struct pcb *pcb;
151
152	pcb = td->td_pcb;
153	if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
154		/*
155		 * If we have just been running VFP instructions we will
156		 * need to save the state to memcpy it below.
157		 */
158		if (td == curthread)
159			vfp_save_state(td, pcb);
160	}
161
162	KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
163	    ("Called fill_fpregs while the kernel is using the VFP"));
164	memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs,
165	    sizeof(regs->fp_q));
166	regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr;
167	regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr;
168#else
169	memset(regs, 0, sizeof(*regs));
170#endif
171	return (0);
172}
173
174int
175set_fpregs(struct thread *td, struct fpreg *regs)
176{
177#ifdef VFP
178	struct pcb *pcb;
179
180	pcb = td->td_pcb;
181	KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
182	    ("Called set_fpregs while the kernel is using the VFP"));
183	memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q));
184	pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr;
185	pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr;
186#endif
187	return (0);
188}
189
190int
191fill_dbregs(struct thread *td, struct dbreg *regs)
192{
193	struct debug_monitor_state *monitor;
194	int i;
195	uint8_t debug_ver, nbkpts, nwtpts;
196
197	memset(regs, 0, sizeof(*regs));
198
199	extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_DebugVer_SHIFT,
200	    &debug_ver);
201	extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_BRPs_SHIFT,
202	    &nbkpts);
203	extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_WRPs_SHIFT,
204	    &nwtpts);
205
206	/*
207	 * The BRPs field contains the number of breakpoints - 1. Armv8-A
208	 * allows the hardware to provide 2-16 breakpoints so this won't
209	 * overflow an 8 bit value. The same applies to the WRPs field.
210	 */
211	nbkpts++;
212	nwtpts++;
213
214	regs->db_debug_ver = debug_ver;
215	regs->db_nbkpts = nbkpts;
216	regs->db_nwtpts = nwtpts;
217
218	monitor = &td->td_pcb->pcb_dbg_regs;
219	if ((monitor->dbg_flags & DBGMON_ENABLED) != 0) {
220		for (i = 0; i < nbkpts; i++) {
221			regs->db_breakregs[i].dbr_addr = monitor->dbg_bvr[i];
222			regs->db_breakregs[i].dbr_ctrl = monitor->dbg_bcr[i];
223		}
224		for (i = 0; i < nwtpts; i++) {
225			regs->db_watchregs[i].dbw_addr = monitor->dbg_wvr[i];
226			regs->db_watchregs[i].dbw_ctrl = monitor->dbg_wcr[i];
227		}
228	}
229
230	return (0);
231}
232
233int
234set_dbregs(struct thread *td, struct dbreg *regs)
235{
236	struct debug_monitor_state *monitor;
237	uint64_t addr;
238	uint32_t ctrl;
239	int i;
240
241	monitor = &td->td_pcb->pcb_dbg_regs;
242	monitor->dbg_enable_count = 0;
243
244	for (i = 0; i < DBG_BRP_MAX; i++) {
245		addr = regs->db_breakregs[i].dbr_addr;
246		ctrl = regs->db_breakregs[i].dbr_ctrl;
247
248		/*
249		 * Don't let the user set a breakpoint on a kernel or
250		 * non-canonical user address.
251		 */
252		if (addr >= VM_MAXUSER_ADDRESS)
253			return (EINVAL);
254
255		/*
256		 * The lowest 2 bits are ignored, so record the effective
257		 * address.
258		 */
259		addr = rounddown2(addr, 4);
260
261		/*
262		 * Some control fields are ignored, and other bits reserved.
263		 * Only unlinked, address-matching breakpoints are supported.
264		 *
265		 * XXX: fields that appear unvalidated, such as BAS, have
266		 * constrained undefined behaviour. If the user mis-programs
267		 * these, there is no risk to the system.
268		 */
269		ctrl &= DBGBCR_EN | DBGBCR_PMC | DBGBCR_BAS;
270		if ((ctrl & DBGBCR_EN) != 0) {
271			/* Only target EL0. */
272			if ((ctrl & DBGBCR_PMC) != DBGBCR_PMC_EL0)
273				return (EINVAL);
274
275			monitor->dbg_enable_count++;
276		}
277
278		monitor->dbg_bvr[i] = addr;
279		monitor->dbg_bcr[i] = ctrl;
280	}
281
282	for (i = 0; i < DBG_WRP_MAX; i++) {
283		addr = regs->db_watchregs[i].dbw_addr;
284		ctrl = regs->db_watchregs[i].dbw_ctrl;
285
286		/*
287		 * Don't let the user set a watchpoint on a kernel or
288		 * non-canonical user address.
289		 */
290		if (addr >= VM_MAXUSER_ADDRESS)
291			return (EINVAL);
292
293		/*
294		 * Some control fields are ignored, and other bits reserved.
295		 * Only unlinked watchpoints are supported.
296		 */
297		ctrl &= DBGWCR_EN | DBGWCR_PAC | DBGWCR_LSC | DBGWCR_BAS |
298		    DBGWCR_MASK;
299
300		if ((ctrl & DBGWCR_EN) != 0) {
301			/* Only target EL0. */
302			if ((ctrl & DBGWCR_PAC) != DBGWCR_PAC_EL0)
303				return (EINVAL);
304
305			/* Must set at least one of the load/store bits. */
306			if ((ctrl & DBGWCR_LSC) == 0)
307				return (EINVAL);
308
309			/*
310			 * When specifying the address range with BAS, the MASK
311			 * field must be zero.
312			 */
313			if ((ctrl & DBGWCR_BAS) != DBGWCR_BAS &&
314			    (ctrl & DBGWCR_MASK) != 0)
315				return (EINVAL);
316
317			monitor->dbg_enable_count++;
318		}
319		monitor->dbg_wvr[i] = addr;
320		monitor->dbg_wcr[i] = ctrl;
321	}
322
323	if (monitor->dbg_enable_count > 0)
324		monitor->dbg_flags |= DBGMON_ENABLED;
325
326	return (0);
327}
328
329#ifdef COMPAT_FREEBSD32
330int
331fill_regs32(struct thread *td, struct reg32 *regs)
332{
333	int i;
334	struct trapframe *tf;
335
336	tf = td->td_frame;
337	for (i = 0; i < 13; i++)
338		regs->r[i] = tf->tf_x[i];
339	/* For arm32, SP is r13 and LR is r14 */
340	regs->r_sp = tf->tf_x[13];
341	regs->r_lr = tf->tf_x[14];
342	regs->r_pc = tf->tf_elr;
343	regs->r_cpsr = tf->tf_spsr;
344
345	return (0);
346}
347
348int
349set_regs32(struct thread *td, struct reg32 *regs)
350{
351	int i;
352	struct trapframe *tf;
353
354	tf = td->td_frame;
355	for (i = 0; i < 13; i++)
356		tf->tf_x[i] = regs->r[i];
357	/* For arm 32, SP is r13 an LR is r14 */
358	tf->tf_x[13] = regs->r_sp;
359	tf->tf_x[14] = regs->r_lr;
360	tf->tf_elr = regs->r_pc;
361	tf->tf_spsr &= ~PSR_SETTABLE_32;
362	tf->tf_spsr |= regs->r_cpsr & PSR_SETTABLE_32;
363
364	return (0);
365}
366
367/* XXX fill/set dbregs/fpregs are stubbed on 32-bit arm. */
368int
369fill_fpregs32(struct thread *td, struct fpreg32 *regs)
370{
371
372	memset(regs, 0, sizeof(*regs));
373	return (0);
374}
375
376int
377set_fpregs32(struct thread *td, struct fpreg32 *regs)
378{
379
380	return (0);
381}
382
383int
384fill_dbregs32(struct thread *td, struct dbreg32 *regs)
385{
386
387	memset(regs, 0, sizeof(*regs));
388	return (0);
389}
390
391int
392set_dbregs32(struct thread *td, struct dbreg32 *regs)
393{
394
395	return (0);
396}
397#endif
398
399void
400exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
401{
402	struct trapframe *tf = td->td_frame;
403	struct pcb *pcb = td->td_pcb;
404
405	memset(tf, 0, sizeof(struct trapframe));
406
407	tf->tf_x[0] = stack;
408	tf->tf_sp = STACKALIGN(stack);
409	tf->tf_lr = imgp->entry_addr;
410	tf->tf_elr = imgp->entry_addr;
411
412	td->td_pcb->pcb_tpidr_el0 = 0;
413	td->td_pcb->pcb_tpidrro_el0 = 0;
414	WRITE_SPECIALREG(tpidrro_el0, 0);
415	WRITE_SPECIALREG(tpidr_el0, 0);
416
417#ifdef VFP
418	vfp_reset_state(td, pcb);
419#endif
420
421	/*
422	 * Clear debug register state. It is not applicable to the new process.
423	 */
424	bzero(&pcb->pcb_dbg_regs, sizeof(pcb->pcb_dbg_regs));
425
426	/* Generate new pointer authentication keys */
427	ptrauth_exec(td);
428}
429
430/* Sanity check these are the same size, they will be memcpy'd to and from */
431CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
432    sizeof((struct gpregs *)0)->gp_x);
433CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
434    sizeof((struct reg *)0)->x);
435
436int
437get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
438{
439	struct trapframe *tf = td->td_frame;
440
441	if (clear_ret & GET_MC_CLEAR_RET) {
442		mcp->mc_gpregs.gp_x[0] = 0;
443		mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
444	} else {
445		mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
446		mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
447	}
448
449	memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
450	    sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
451
452	mcp->mc_gpregs.gp_sp = tf->tf_sp;
453	mcp->mc_gpregs.gp_lr = tf->tf_lr;
454	mcp->mc_gpregs.gp_elr = tf->tf_elr;
455	get_fpcontext(td, mcp);
456
457	return (0);
458}
459
460int
461set_mcontext(struct thread *td, mcontext_t *mcp)
462{
463#define	PSR_13_MASK	0xfffffffful
464	struct arm64_reg_context ctx;
465	struct trapframe *tf = td->td_frame;
466	uint64_t spsr;
467	vm_offset_t addr;
468	int error;
469	bool done;
470
471	spsr = mcp->mc_gpregs.gp_spsr;
472#ifdef COMPAT_FREEBSD13
473	if (td->td_proc->p_osrel < P_OSREL_ARM64_SPSR) {
474		/*
475		 * Before FreeBSD 14 gp_spsr was 32 bit. The size of mc_gpregs
476		 * was identical because of padding so mask of the upper bits
477		 * that may be invalid on earlier releases.
478		 */
479		spsr &= PSR_13_MASK;
480	}
481#endif
482
483	if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
484	    (spsr & PSR_AARCH32) != 0 ||
485	    (spsr & PSR_DAIF) != (td->td_frame->tf_spsr & PSR_DAIF))
486		return (EINVAL);
487
488	memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
489
490	tf->tf_sp = mcp->mc_gpregs.gp_sp;
491	tf->tf_lr = mcp->mc_gpregs.gp_lr;
492	tf->tf_elr = mcp->mc_gpregs.gp_elr;
493#ifdef COMPAT_FREEBSD13
494	if (td->td_proc->p_osrel < P_OSREL_ARM64_SPSR) {
495		/* Keep the upper 32 bits of spsr on older releases */
496		tf->tf_spsr &= ~PSR_13_MASK;
497		tf->tf_spsr |= spsr;
498	} else
499#endif
500		tf->tf_spsr = spsr;
501	if ((tf->tf_spsr & PSR_SS) != 0) {
502		td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
503
504		WRITE_SPECIALREG(mdscr_el1,
505		    READ_SPECIALREG(mdscr_el1) | MDSCR_SS);
506		isb();
507	}
508
509	set_fpcontext(td, mcp);
510
511	/* Read any register contexts we find */
512	if (mcp->mc_ptr != 0) {
513		addr = mcp->mc_ptr;
514
515		done = false;
516		do {
517			if (!__is_aligned(addr,
518			    _Alignof(struct arm64_reg_context)))
519				return (EINVAL);
520
521			error = copyin((const void *)addr, &ctx, sizeof(ctx));
522			if (error != 0)
523				return (error);
524
525			switch (ctx.ctx_id) {
526			case ARM64_CTX_END:
527				done = true;
528				break;
529			default:
530				return (EINVAL);
531			}
532
533			addr += ctx.ctx_size;
534		} while (!done);
535	}
536
537	return (0);
538#undef PSR_13_MASK
539}
540
541static void
542get_fpcontext(struct thread *td, mcontext_t *mcp)
543{
544#ifdef VFP
545	struct pcb *curpcb;
546
547	MPASS(td == curthread);
548
549	curpcb = curthread->td_pcb;
550	if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
551		/*
552		 * If we have just been running VFP instructions we will
553		 * need to save the state to memcpy it below.
554		 */
555		vfp_save_state(td, curpcb);
556	}
557
558	KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
559	    ("Called get_fpcontext while the kernel is using the VFP"));
560	KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
561	    ("Non-userspace FPU flags set in get_fpcontext"));
562	memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs,
563	    sizeof(mcp->mc_fpregs.fp_q));
564	mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr;
565	mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr;
566	mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
567	mcp->mc_flags |= _MC_FP_VALID;
568#endif
569}
570
571static void
572set_fpcontext(struct thread *td, mcontext_t *mcp)
573{
574#ifdef VFP
575	struct pcb *curpcb;
576
577	MPASS(td == curthread);
578	if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
579		curpcb = curthread->td_pcb;
580
581		/*
582		 * Discard any vfp state for the current thread, we
583		 * are about to override it.
584		 */
585		critical_enter();
586		vfp_discard(td);
587		critical_exit();
588
589		KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
590		    ("Called set_fpcontext while the kernel is using the VFP"));
591		memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q,
592		    sizeof(mcp->mc_fpregs.fp_q));
593		curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr;
594		curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr;
595		curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK;
596	}
597#endif
598}
599
600int
601sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
602{
603	ucontext_t uc;
604	int error;
605
606	if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
607		return (EFAULT);
608
609	error = set_mcontext(td, &uc.uc_mcontext);
610	if (error != 0)
611		return (error);
612
613	/* Restore signal mask. */
614	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
615
616	return (EJUSTRETURN);
617}
618
619static bool
620sendsig_ctx_end(struct thread *td, vm_offset_t *addrp)
621{
622	struct arm64_reg_context end_ctx;
623	vm_offset_t ctx_addr;
624
625	*addrp -= sizeof(end_ctx);
626	ctx_addr = *addrp;
627
628	memset(&end_ctx, 0, sizeof(end_ctx));
629	end_ctx.ctx_id = ARM64_CTX_END;
630	end_ctx.ctx_size = sizeof(end_ctx);
631
632	if (copyout(&end_ctx, (void *)ctx_addr, sizeof(end_ctx)) != 0)
633		return (false);
634
635	return (true);
636}
637
638typedef bool(*ctx_func)(struct thread *, vm_offset_t *);
639static const ctx_func ctx_funcs[] = {
640	sendsig_ctx_end,	/* Must be first to end the linked list */
641	NULL,
642};
643
644void
645sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
646{
647	struct thread *td;
648	struct proc *p;
649	struct trapframe *tf;
650	struct sigframe *fp, frame;
651	struct sigacts *psp;
652	vm_offset_t addr;
653	int onstack, sig;
654
655	td = curthread;
656	p = td->td_proc;
657	PROC_LOCK_ASSERT(p, MA_OWNED);
658
659	sig = ksi->ksi_signo;
660	psp = p->p_sigacts;
661	mtx_assert(&psp->ps_mtx, MA_OWNED);
662
663	tf = td->td_frame;
664	onstack = sigonstack(tf->tf_sp);
665
666	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
667	    catcher, sig);
668
669	/* Allocate and validate space for the signal handler context. */
670	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
671	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
672		addr = ((uintptr_t)td->td_sigstk.ss_sp +
673		    td->td_sigstk.ss_size);
674#if defined(COMPAT_43)
675		td->td_sigstk.ss_flags |= SS_ONSTACK;
676#endif
677	} else {
678		addr = td->td_frame->tf_sp;
679	}
680
681	/* Fill in the frame to copy out */
682	bzero(&frame, sizeof(frame));
683	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
684	frame.sf_si = ksi->ksi_info;
685	frame.sf_uc.uc_sigmask = *mask;
686	frame.sf_uc.uc_stack = td->td_sigstk;
687	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) != 0 ?
688	    (onstack ? SS_ONSTACK : 0) : SS_DISABLE;
689	mtx_unlock(&psp->ps_mtx);
690	PROC_UNLOCK(td->td_proc);
691
692	for (int i = 0; ctx_funcs[i] != NULL; i++) {
693		if (!ctx_funcs[i](td, &addr)) {
694			/* Process has trashed its stack. Kill it. */
695			CTR4(KTR_SIG,
696			    "sendsig: frame sigexit td=%p fp=%#lx func[%d]=%p",
697			    td, addr, i, ctx_funcs[i]);
698			PROC_LOCK(p);
699			sigexit(td, SIGILL);
700			/* NOTREACHED */
701		}
702	}
703
704	/* Point at the first context */
705	frame.sf_uc.uc_mcontext.mc_ptr = addr;
706
707	/* Make room, keeping the stack aligned */
708	fp = (struct sigframe *)addr;
709	fp--;
710	fp = (struct sigframe *)STACKALIGN(fp);
711
712	/* Copy the sigframe out to the user's stack. */
713	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
714		/* Process has trashed its stack. Kill it. */
715		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
716		PROC_LOCK(p);
717		sigexit(td, SIGILL);
718	}
719
720	tf->tf_x[0] = sig;
721	tf->tf_x[1] = (register_t)&fp->sf_si;
722	tf->tf_x[2] = (register_t)&fp->sf_uc;
723	tf->tf_x[8] = (register_t)catcher;
724	tf->tf_sp = (register_t)fp;
725	tf->tf_elr = (register_t)PROC_SIGCODE(p);
726
727	/* Clear the single step flag while in the signal handler */
728	if ((td->td_pcb->pcb_flags & PCB_SINGLE_STEP) != 0) {
729		td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
730		WRITE_SPECIALREG(mdscr_el1,
731		    READ_SPECIALREG(mdscr_el1) & ~MDSCR_SS);
732		isb();
733	}
734
735	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
736	    tf->tf_sp);
737
738	PROC_LOCK(p);
739	mtx_lock(&psp->ps_mtx);
740}
741