1/*	$OpenBSD: trap.c,v 1.166 2024/04/14 03:26:25 jsg Exp $	*/
2
3/*
4 * Copyright (c) 1998-2004 Michael Shalayeff
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/* #define TRAPDEBUG */
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/syscall.h>
34#include <sys/proc.h>
35#include <sys/signalvar.h>
36#include <sys/user.h>
37#include <sys/syscall_mi.h>
38
39#include <uvm/uvm_extern.h>
40
41#include <machine/autoconf.h>
42
43#ifdef DDB
44#ifdef TRAPDEBUG
45#include <ddb/db_output.h>
46#else
47#include <machine/db_machdep.h>
48#endif
49#endif
50
51static __inline int inst_store(u_int ins) {
52	return (ins & 0xf0000000) == 0x60000000 ||	/* st */
53	       (ins & 0xf4000200) == 0x24000200 ||	/* fst/cst */
54	       (ins & 0xfc000200) == 0x0c000200 ||	/* stby */
55	       (ins & 0xfc0003c0) == 0x0c0001c0;	/* ldcw */
56}
57
58int	pcxs_unaligned(u_int opcode, vaddr_t va);
59#ifdef PTRACE
60void	ss_clear_breakpoints(struct proc *p);
61#endif
62
63void	ast(struct proc *);
64
65/* single-step breakpoint */
66#define SSBREAKPOINT	(HPPA_BREAK_KERNEL | (HPPA_BREAK_SS << 13))
67
68const char *trap_type[] = {
69	"invalid",
70	"HPMC",
71	"power failure",
72	"recovery counter",
73	"external interrupt",
74	"LPMC",
75	"ITLB miss fault",
76	"instruction protection",
77	"Illegal instruction",
78	"break instruction",
79	"privileged operation",
80	"privileged register",
81	"overflow",
82	"conditional",
83	"assist exception",
84	"DTLB miss",
85	"ITLB non-access miss",
86	"DTLB non-access miss",
87	"data protection/rights/alignment",
88	"data break",
89	"TLB dirty",
90	"page reference",
91	"assist emulation",
92	"higher-priv transfer",
93	"lower-priv transfer",
94	"taken branch",
95	"data access rights",
96	"data protection",
97	"unaligned data ref",
98};
99int trap_types = sizeof(trap_type)/sizeof(trap_type[0]);
100
101#define	frame_regmap(tf,r)	(((u_int *)(tf))[hppa_regmap[(r)]])
102u_char hppa_regmap[32] = {
103	offsetof(struct trapframe, tf_pad[0]) / 4,	/* r0 XXX */
104	offsetof(struct trapframe, tf_r1) / 4,
105	offsetof(struct trapframe, tf_rp) / 4,
106	offsetof(struct trapframe, tf_r3) / 4,
107	offsetof(struct trapframe, tf_r4) / 4,
108	offsetof(struct trapframe, tf_r5) / 4,
109	offsetof(struct trapframe, tf_r6) / 4,
110	offsetof(struct trapframe, tf_r7) / 4,
111	offsetof(struct trapframe, tf_r8) / 4,
112	offsetof(struct trapframe, tf_r9) / 4,
113	offsetof(struct trapframe, tf_r10) / 4,
114	offsetof(struct trapframe, tf_r11) / 4,
115	offsetof(struct trapframe, tf_r12) / 4,
116	offsetof(struct trapframe, tf_r13) / 4,
117	offsetof(struct trapframe, tf_r14) / 4,
118	offsetof(struct trapframe, tf_r15) / 4,
119	offsetof(struct trapframe, tf_r16) / 4,
120	offsetof(struct trapframe, tf_r17) / 4,
121	offsetof(struct trapframe, tf_r18) / 4,
122	offsetof(struct trapframe, tf_t4) / 4,
123	offsetof(struct trapframe, tf_t3) / 4,
124	offsetof(struct trapframe, tf_t2) / 4,
125	offsetof(struct trapframe, tf_t1) / 4,
126	offsetof(struct trapframe, tf_arg3) / 4,
127	offsetof(struct trapframe, tf_arg2) / 4,
128	offsetof(struct trapframe, tf_arg1) / 4,
129	offsetof(struct trapframe, tf_arg0) / 4,
130	offsetof(struct trapframe, tf_dp) / 4,
131	offsetof(struct trapframe, tf_ret0) / 4,
132	offsetof(struct trapframe, tf_ret1) / 4,
133	offsetof(struct trapframe, tf_sp) / 4,
134	offsetof(struct trapframe, tf_r31) / 4,
135};
136
137void
138ast(struct proc *p)
139{
140	if (p->p_md.md_astpending) {
141		p->p_md.md_astpending = 0;
142		uvmexp.softs++;
143		mi_ast(p, curcpu()->ci_want_resched);
144	}
145
146}
147
148void
149trap(int type, struct trapframe *frame)
150{
151	struct proc *p = curproc;
152	vaddr_t va;
153	struct vm_map *map;
154	struct vmspace *vm;
155	register vm_prot_t access_type;
156	register pa_space_t space;
157	union sigval sv;
158	u_int opcode;
159	int ret, trapnum;
160	const char *tts;
161#ifdef DIAGNOSTIC
162	int oldcpl = curcpu()->ci_cpl;
163#endif
164
165	trapnum = type & ~T_USER;
166	opcode = frame->tf_iir;
167	if (trapnum <= T_EXCEPTION || trapnum == T_HIGHERPL ||
168	    trapnum == T_LOWERPL || trapnum == T_TAKENBR ||
169	    trapnum == T_IDEBUG || trapnum == T_PERFMON) {
170		va = frame->tf_iioq_head;
171		space = frame->tf_iisq_head;
172		access_type = PROT_EXEC;
173	} else {
174		va = frame->tf_ior;
175		space = frame->tf_isr;
176		if (va == frame->tf_iioq_head)
177			access_type = PROT_EXEC;
178		else if (inst_store(opcode))
179			access_type = PROT_WRITE;
180		else
181			access_type = PROT_READ;
182	}
183
184	if (frame->tf_flags & TFF_LAST)
185		p->p_md.md_regs = frame;
186
187	if (trapnum > trap_types)
188		tts = "reserved";
189	else
190		tts = trap_type[trapnum];
191
192#ifdef TRAPDEBUG
193	if (trapnum != T_INTERRUPT && trapnum != T_IBREAK)
194		db_printf("trap: %x, %s for %x:%x at %x:%x, fl=%x, fp=%p\n",
195		    type, tts, space, va, frame->tf_iisq_head,
196		    frame->tf_iioq_head, frame->tf_flags, frame);
197	else if (trapnum  == T_IBREAK)
198		db_printf("trap: break instruction %x:%x at %x:%x, fp=%p\n",
199		    break5(opcode), break13(opcode),
200		    frame->tf_iisq_head, frame->tf_iioq_head, frame);
201
202	{
203		extern int etext;
204		if (frame < (struct trapframe *)&etext) {
205			printf("trap: bogus frame ptr %p\n", frame);
206			goto dead_end;
207		}
208	}
209#endif
210	if (trapnum != T_INTERRUPT) {
211		uvmexp.traps++;
212		mtctl(frame->tf_eiem, CR_EIEM);
213	}
214
215	if (type & T_USER)
216		refreshcreds(p);
217
218	switch (type) {
219	case T_NONEXIST:
220	case T_NONEXIST | T_USER:
221		/* we've got screwed up by the central scrutinizer */
222		printf("trap: elvis has just left the building!\n");
223		goto dead_end;
224
225	case T_RECOVERY:
226	case T_RECOVERY | T_USER:
227		/* XXX will implement later */
228		printf("trap: handicapped");
229		goto dead_end;
230
231#ifdef DIAGNOSTIC
232	case T_EXCEPTION:
233		panic("FPU/SFU emulation botch");
234
235		/* these just can't happen ever */
236	case T_PRIV_OP:
237	case T_PRIV_REG:
238		/* these just can't make it to the trap() ever */
239	case T_HPMC:
240	case T_HPMC | T_USER:
241#endif
242	case T_IBREAK:
243	case T_DATALIGN:
244	case T_DBREAK:
245	dead_end:
246#ifdef DDB
247		if (db_ktrap(type, va, frame)) {
248			if (type == T_IBREAK) {
249				/* skip break instruction */
250				frame->tf_iioq_head = frame->tf_iioq_tail;
251				frame->tf_iioq_tail += 4;
252			}
253			return;
254		}
255#else
256		if (type == T_DATALIGN || type == T_DPROT)
257			panic ("trap: %s at 0x%lx", tts, va);
258		else
259			panic ("trap: no debugger for \"%s\" (%d)", tts, type);
260#endif
261		break;
262
263	case T_IBREAK | T_USER:
264	case T_DBREAK | T_USER: {
265		int code = TRAP_BRKPT;
266
267#ifdef PTRACE
268		KERNEL_LOCK();
269		ss_clear_breakpoints(p);
270		if (opcode == SSBREAKPOINT)
271			code = TRAP_TRACE;
272		KERNEL_UNLOCK();
273#endif
274		/* pass to user debugger */
275		sv.sival_int = va;
276		trapsignal(p, SIGTRAP, type & ~T_USER, code, sv);
277		}
278		break;
279
280#ifdef PTRACE
281	case T_TAKENBR | T_USER:
282		KERNEL_LOCK();
283		ss_clear_breakpoints(p);
284		KERNEL_UNLOCK();
285		/* pass to user debugger */
286		sv.sival_int = va;
287		trapsignal(p, SIGTRAP, type & ~T_USER, TRAP_TRACE, sv);
288		break;
289#endif
290
291	case T_EXCEPTION | T_USER: {
292		struct hppa_fpstate *hfp;
293		u_int64_t *fpp;
294		u_int32_t *pex;
295		int i, flt;
296
297		hfp = (struct hppa_fpstate *)frame->tf_cr30;
298		fpp = (u_int64_t *)&hfp->hfp_regs;
299
300		pex = (u_int32_t *)&fpp[0];
301		for (i = 0, pex++; i < 7 && !*pex; i++, pex++)
302			;
303		flt = 0;
304		if (i < 7) {
305			u_int32_t stat = HPPA_FPU_OP(*pex);
306			if (stat & HPPA_FPU_UNMPL)
307				flt = FPE_FLTINV;
308			else if (stat & (HPPA_FPU_V << 1))
309				flt = FPE_FLTINV;
310			else if (stat & (HPPA_FPU_Z << 1))
311				flt = FPE_FLTDIV;
312			else if (stat & (HPPA_FPU_I << 1))
313				flt = FPE_FLTRES;
314			else if (stat & (HPPA_FPU_O << 1))
315				flt = FPE_FLTOVF;
316			else if (stat & (HPPA_FPU_U << 1))
317				flt = FPE_FLTUND;
318			/* still left: under/over-flow w/ inexact */
319
320			/* cleanup exceptions (XXX deliver all ?) */
321			while (i++ < 7)
322				*pex++ = 0;
323		}
324		/* reset the trap flag, as if there was none */
325		fpp[0] &= ~(((u_int64_t)HPPA_FPU_T) << 32);
326
327		sv.sival_int = va;
328		trapsignal(p, SIGFPE, type & ~T_USER, flt, sv);
329		}
330		break;
331
332	case T_EMULATION:
333		panic("trap: emulation trap in the kernel");
334		break;
335
336	case T_EMULATION | T_USER:
337		sv.sival_int = va;
338		trapsignal(p, SIGILL, type & ~T_USER, ILL_COPROC, sv);
339		break;
340
341	case T_OVERFLOW | T_USER:
342		sv.sival_int = va;
343		trapsignal(p, SIGFPE, type & ~T_USER, FPE_INTOVF, sv);
344		break;
345
346	case T_CONDITION | T_USER:
347		sv.sival_int = va;
348		trapsignal(p, SIGFPE, type & ~T_USER, FPE_INTDIV, sv);
349		break;
350
351	case T_PRIV_OP | T_USER:
352		sv.sival_int = va;
353		trapsignal(p, SIGILL, type & ~T_USER, ILL_PRVOPC, sv);
354		break;
355
356	case T_PRIV_REG | T_USER:
357		/*
358		 * On PCXS processors, attempting to read control registers
359		 * cr26 and cr27 from userland causes a ``privileged register''
360		 * trap.  Later processors do not restrict read accesses to
361		 * these registers.
362		 */
363		if (cpu_type == hpcxs &&
364		    (opcode & (0xfc1fffe0 | (0x1e << 21))) ==
365		     (0x000008a0 | (0x1a << 21))) { /* mfctl %cr{26,27}, %r# */
366			register_t cr;
367
368			if (((opcode >> 21) & 0x1f) == 27)
369				cr = frame->tf_cr27;	/* cr27 */
370			else
371				cr = 0;			/* cr26 */
372			frame_regmap(frame, opcode & 0x1f) = cr;
373			frame->tf_ipsw |= PSL_N;
374		} else {
375			sv.sival_int = va;
376			trapsignal(p, SIGILL, type & ~T_USER, ILL_PRVREG, sv);
377		}
378		break;
379
380		/* these should never got here */
381	case T_HIGHERPL | T_USER:
382	case T_LOWERPL | T_USER:
383	case T_DATAPID | T_USER:
384		sv.sival_int = va;
385		trapsignal(p, SIGSEGV, access_type, SEGV_ACCERR, sv);
386		break;
387
388	/*
389	 * On PCXS processors, traps T_DATACC, T_DATAPID and T_DATALIGN
390	 * are shared.  We need to sort out the unaligned access situation
391	 * first, before handling this trap as T_DATACC.
392	 */
393	case T_DPROT | T_USER:
394		if (cpu_type == hpcxs) {
395			if (pcxs_unaligned(opcode, va))
396				goto datalign_user;
397			else
398				goto datacc;
399		}
400
401		sv.sival_int = va;
402		trapsignal(p, SIGSEGV, access_type, SEGV_ACCERR, sv);
403		break;
404
405	case T_ITLBMISSNA:
406	case T_ITLBMISSNA | T_USER:
407	case T_DTLBMISSNA:
408	case T_DTLBMISSNA | T_USER:
409		if (space == HPPA_SID_KERNEL)
410			map = kernel_map;
411		else {
412			vm = p->p_vmspace;
413			map = &vm->vm_map;
414		}
415
416		if ((opcode & 0xfc003fc0) == 0x04001340) {
417			/* lpa failure case */
418			frame_regmap(frame, opcode & 0x1f) = 0;
419			frame->tf_ipsw |= PSL_N;
420		} else if ((opcode & 0xfc001f80) == 0x04001180) {
421			int pl;
422
423			/* dig probe[rw]i? insns */
424			if (opcode & 0x2000)
425				pl = (opcode >> 16) & 3;
426			else
427				pl = frame_regmap(frame,
428				    (opcode >> 16) & 0x1f) & 3;
429
430			KERNEL_LOCK();
431
432			if ((type & T_USER && space == HPPA_SID_KERNEL) ||
433			    (frame->tf_iioq_head & 3) != pl ||
434			    (type & T_USER && va >= VM_MAXUSER_ADDRESS) ||
435			    uvm_fault(map, trunc_page(va), 0,
436			     opcode & 0x40? PROT_WRITE : PROT_READ)) {
437				frame_regmap(frame, opcode & 0x1f) = 0;
438				frame->tf_ipsw |= PSL_N;
439			}
440
441			KERNEL_UNLOCK();
442		} else if (type & T_USER) {
443			sv.sival_int = va;
444			trapsignal(p, SIGILL, type & ~T_USER, ILL_ILLTRP, sv);
445		} else
446			panic("trap: %s @ 0x%lx:0x%lx for 0x%x:0x%lx irr 0x%08x",
447			    tts, frame->tf_iisq_head, frame->tf_iioq_head,
448			    space, va, opcode);
449		break;
450
451	case T_IPROT | T_USER:
452	case T_TLB_DIRTY:
453	case T_TLB_DIRTY | T_USER:
454	case T_DATACC:
455	case T_DATACC | T_USER:
456datacc:
457	case T_ITLBMISS:
458	case T_ITLBMISS | T_USER:
459	case T_DTLBMISS:
460	case T_DTLBMISS | T_USER:
461		if (type & T_USER) {
462			if (!uvm_map_inentry(p, &p->p_spinentry, PROC_STACK(p),
463			    "[%s]%d/%d sp=%lx inside %lx-%lx: not MAP_STACK\n",
464			    uvm_map_inentry_sp, p->p_vmspace->vm_map.sserial))
465				goto out;
466		}
467
468		/*
469		 * it could be a kernel map for exec_map faults
470		 */
471		if (space == HPPA_SID_KERNEL)
472			map = kernel_map;
473		else {
474			vm = p->p_vmspace;
475			map = &vm->vm_map;
476		}
477
478		/*
479		 * user faults out of user addr space are always a fail,
480		 * this happens on va >= VM_MAXUSER_ADDRESS, where
481		 * space id will be zero and therefore cause
482		 * a misbehave lower in the code.
483		 *
484		 * also check that faulted space id matches the curproc.
485		 */
486		if ((type & T_USER && va >= VM_MAXUSER_ADDRESS) ||
487		   (type & T_USER && map->pmap->pm_space != space)) {
488			sv.sival_int = va;
489			trapsignal(p, SIGSEGV, access_type, SEGV_MAPERR, sv);
490			break;
491		}
492
493		KERNEL_LOCK();
494		ret = uvm_fault(map, trunc_page(va), 0, access_type);
495		KERNEL_UNLOCK();
496
497		/*
498		 * If this was a stack access we keep track of the maximum
499		 * accessed stack size.  Also, if uvm_fault gets a protection
500		 * failure it is due to accessing the stack region outside
501		 * the current limit and we need to reflect that as an access
502		 * error.
503		 */
504		if (ret == 0 && space != HPPA_SID_KERNEL)
505			uvm_grow(p, va);
506
507		if (ret != 0) {
508			if (type & T_USER) {
509				int signal, sicode;
510
511				signal = SIGSEGV;
512				sicode = SEGV_MAPERR;
513				if (ret == EACCES)
514					sicode = SEGV_ACCERR;
515				if (ret == EIO) {
516					signal = SIGBUS;
517					sicode = BUS_OBJERR;
518				}
519				sv.sival_int = va;
520				trapsignal(p, signal, access_type, sicode, sv);
521			} else {
522				if (p && p->p_addr->u_pcb.pcb_onfault) {
523					frame->tf_iioq_tail = 4 +
524					    (frame->tf_iioq_head =
525						p->p_addr->u_pcb.pcb_onfault);
526#ifdef DDB
527					frame->tf_iir = 0;
528#endif
529				} else {
530					panic("trap: "
531					    "uvm_fault(%p, %lx, 0, %d): %d",
532					    map, va, access_type, ret);
533				}
534			}
535		}
536		break;
537
538	case T_DATAPID:
539		/* This should never happen, unless within spcopy() */
540		if (p && p->p_addr->u_pcb.pcb_onfault) {
541			frame->tf_iioq_tail = 4 +
542			    (frame->tf_iioq_head =
543				p->p_addr->u_pcb.pcb_onfault);
544#ifdef DDB
545			frame->tf_iir = 0;
546#endif
547		} else
548			goto dead_end;
549		break;
550
551	case T_DATALIGN | T_USER:
552datalign_user:
553		sv.sival_int = va;
554		trapsignal(p, SIGBUS, access_type, BUS_ADRALN, sv);
555		break;
556
557	case T_INTERRUPT:
558	case T_INTERRUPT | T_USER:
559		cpu_intr(frame);
560		break;
561
562	case T_CONDITION:
563		panic("trap: divide by zero in the kernel");
564		break;
565
566	case T_ILLEGAL:
567	case T_ILLEGAL | T_USER:
568		/* see if it's a SPOP1,,0 */
569		if ((opcode & 0xfffffe00) == 0x10000200) {
570			frame_regmap(frame, opcode & 0x1f) = 0;
571			frame->tf_ipsw |= PSL_N;
572			break;
573		}
574		if (type & T_USER) {
575			sv.sival_int = va;
576			trapsignal(p, SIGILL, type & ~T_USER, ILL_ILLOPC, sv);
577			break;
578		}
579		/* FALLTHROUGH */
580
581	/*
582	 * On PCXS processors, traps T_DATACC, T_DATAPID and T_DATALIGN
583	 * are shared.  We need to sort out the unaligned access situation
584	 * first, before handling this trap as T_DATACC.
585	 */
586	case T_DPROT:
587		if (cpu_type == hpcxs) {
588			if (pcxs_unaligned(opcode, va))
589				goto dead_end;
590			else
591				goto datacc;
592		}
593		/* FALLTHROUGH to unimplemented */
594
595	case T_LOWERPL:
596	case T_IPROT:
597	case T_OVERFLOW:
598	case T_HIGHERPL:
599	case T_TAKENBR:
600	case T_POWERFAIL:
601	case T_LPMC:
602	case T_PAGEREF:
603		/* FALLTHROUGH to unimplemented */
604	default:
605#ifdef TRAPDEBUG
606		if (db_ktrap(type, va, frame))
607			return;
608#endif
609		panic("trap: unimplemented \'%s\' (%d)", tts, trapnum);
610	}
611
612#ifdef DIAGNOSTIC
613	if (curcpu()->ci_cpl != oldcpl)
614		printf("WARNING: SPL (%d) NOT LOWERED ON "
615		    "TRAP (%d) EXIT\n", curcpu()->ci_cpl, trapnum);
616#endif
617
618	if (trapnum != T_INTERRUPT)
619		splx(curcpu()->ci_cpl);	/* process softints */
620
621	/*
622	 * in case we were interrupted from the syscall gate page
623	 * treat this as we were not really running user code no more
624	 * for weird things start to happen on return to the userland
625	 * and also see a note in locore.S:TLABEL(all)
626	 */
627	if ((type & T_USER) && !(frame->tf_iisq_head == HPPA_SID_KERNEL &&
628	    (frame->tf_iioq_head & ~PAGE_MASK) == SYSCALLGATE)) {
629		ast(p);
630out:
631		userret(p);
632	}
633}
634
635void
636child_return(void *arg)
637{
638	struct proc *p = (struct proc *)arg;
639	struct trapframe *tf = p->p_md.md_regs;
640
641	/*
642	 * Set up return value registers as libc:fork() expects
643	 */
644	tf->tf_ret0 = 0;
645	tf->tf_t1 = 0;		/* errno */
646
647	KERNEL_UNLOCK();
648
649	ast(p);
650
651	mi_child_return(p);
652}
653
654#ifdef PTRACE
655
656#include <sys/ptrace.h>
657
658int	ss_get_value(struct proc *p, vaddr_t addr, u_int *value);
659int	ss_put_value(struct proc *p, vaddr_t addr, u_int value);
660
661int
662ss_get_value(struct proc *p, vaddr_t addr, u_int *value)
663{
664	struct uio uio;
665	struct iovec iov;
666
667	iov.iov_base = (caddr_t)value;
668	iov.iov_len = sizeof(u_int);
669	uio.uio_iov = &iov;
670	uio.uio_iovcnt = 1;
671	uio.uio_offset = (off_t)addr;
672	uio.uio_resid = sizeof(u_int);
673	uio.uio_segflg = UIO_SYSSPACE;
674	uio.uio_rw = UIO_READ;
675	uio.uio_procp = curproc;
676	return (process_domem(curproc, p->p_p, &uio, PT_READ_I));
677}
678
679int
680ss_put_value(struct proc *p, vaddr_t addr, u_int value)
681{
682	struct uio uio;
683	struct iovec iov;
684
685	iov.iov_base = (caddr_t)&value;
686	iov.iov_len = sizeof(u_int);
687	uio.uio_iov = &iov;
688	uio.uio_iovcnt = 1;
689	uio.uio_offset = (off_t)addr;
690	uio.uio_resid = sizeof(u_int);
691	uio.uio_segflg = UIO_SYSSPACE;
692	uio.uio_rw = UIO_WRITE;
693	uio.uio_procp = curproc;
694	return (process_domem(curproc, p->p_p, &uio, PT_WRITE_I));
695}
696
697void
698ss_clear_breakpoints(struct proc *p)
699{
700	/* Restore original instructions. */
701	if (p->p_md.md_bpva != 0) {
702		ss_put_value(p, p->p_md.md_bpva, p->p_md.md_bpsave[0]);
703		ss_put_value(p, p->p_md.md_bpva + 4, p->p_md.md_bpsave[1]);
704		p->p_md.md_bpva = 0;
705	}
706}
707
708int
709process_sstep(struct proc *p, int sstep)
710{
711	int error;
712
713	ss_clear_breakpoints(p);
714
715	if (sstep == 0) {
716		p->p_md.md_regs->tf_ipsw &= ~PSL_T;
717		return (0);
718	}
719
720	/*
721	 * Don't touch the syscall gateway page.  Instead, insert a
722	 * breakpoint where we're supposed to return.
723	 */
724	if ((p->p_md.md_regs->tf_iioq_tail & ~PAGE_MASK) == SYSCALLGATE)
725		p->p_md.md_bpva = p->p_md.md_regs->tf_r31 & ~HPPA_PC_PRIV_MASK;
726	else
727		p->p_md.md_bpva = p->p_md.md_regs->tf_iioq_tail & ~HPPA_PC_PRIV_MASK;
728
729	/*
730	 * Insert two breakpoint instructions; the first one might be
731	 * nullified.  Of course we need to save two instruction
732	 * first.
733	 */
734
735	error = ss_get_value(p, p->p_md.md_bpva, &p->p_md.md_bpsave[0]);
736	if (error)
737		return (error);
738	error = ss_get_value(p, p->p_md.md_bpva + 4, &p->p_md.md_bpsave[1]);
739	if (error)
740		return (error);
741
742	error = ss_put_value(p, p->p_md.md_bpva, SSBREAKPOINT);
743	if (error)
744		return (error);
745	error = ss_put_value(p, p->p_md.md_bpva + 4, SSBREAKPOINT);
746	if (error)
747		return (error);
748
749	if ((p->p_md.md_regs->tf_iioq_tail & ~PAGE_MASK) != SYSCALLGATE)
750		p->p_md.md_regs->tf_ipsw |= PSL_T;
751	else
752		p->p_md.md_regs->tf_ipsw &= ~PSL_T;
753
754	return (0);
755}
756
757#endif	/* PTRACE */
758
759void	syscall(struct trapframe *frame);
760
761/*
762 * call actual syscall routine
763 */
764void
765syscall(struct trapframe *frame)
766{
767	struct proc *p = curproc;
768	const struct sysent *callp = sysent;
769	int code, argsize, argoff, error;
770	register_t args[8], rval[2];
771#ifdef DIAGNOSTIC
772	int oldcpl = curcpu()->ci_cpl;
773#endif
774
775	uvmexp.syscalls++;
776
777	if (!USERMODE(frame->tf_iioq_head))
778		panic("syscall");
779
780	p->p_md.md_regs = frame;
781
782	argoff = 4;
783	code = frame->tf_t1;
784	args[0] = frame->tf_arg0;
785	args[1] = frame->tf_arg1;
786	args[2] = frame->tf_arg2;
787	args[3] = frame->tf_arg3;
788
789	// XXX out of range stays on syscall0, which we assume is enosys
790	if (code > 0 && code < SYS_MAXSYSCALL)
791		callp += code;
792
793	if ((argsize = callp->sy_argsize)) {
794		register_t *s, *e, t;
795		int i;
796
797		argsize -= argoff * 4;
798		if (argsize > 0) {
799			i = argsize / 4;
800			if ((error = copyin((void *)(frame->tf_sp +
801			    HPPA_FRAME_ARG(4 + i - 1)), args + argoff,
802			    argsize)))
803				goto bad;
804			/* reverse the args[] entries */
805			s = args + argoff;
806			e = s + i - 1;
807			while (s < e) {
808				t = *s;
809				*s = *e;
810				*e = t;
811				s++, e--;
812			}
813		}
814
815		/*
816		 * System calls with 64-bit arguments need a word swap
817		 * due to the order of the arguments on the stack.
818		 */
819		i = 0;
820		switch (code) {
821		case SYS_lseek:
822		case SYS_truncate:
823		case SYS_ftruncate:	i = 2;	break;
824		case SYS_preadv:
825		case SYS_pwritev:
826		case SYS_pread:
827		case SYS_pwrite:	i = 4;	break;
828		case SYS_mquery:
829		case SYS_mmap:		i = 6;	break;
830		}
831
832		if (i) {
833			t = args[i];
834			args[i] = args[i + 1];
835			args[i + 1] = t;
836		}
837	}
838
839	rval[0] = 0;
840	rval[1] = frame->tf_ret1;
841
842	error = mi_syscall(p, code, callp, args, rval);
843
844	switch (error) {
845	case 0:
846		frame->tf_ret0 = rval[0];
847		frame->tf_ret1 = rval[1];
848		frame->tf_t1 = 0;
849		break;
850	case ERESTART:
851		frame->tf_iioq_head -= 12;
852		frame->tf_iioq_tail -= 12;
853	case EJUSTRETURN:
854		break;
855	default:
856	bad:
857		frame->tf_t1 = error;
858		frame->tf_ret0 = error;
859		frame->tf_ret1 = 0;
860		break;
861	}
862
863	ast(p);		// XXX why?
864
865	mi_syscall_return(p, code, error, rval);
866
867#ifdef DIAGNOSTIC
868	if (curcpu()->ci_cpl != oldcpl) {
869		printf("WARNING: SPL (0x%x) NOT LOWERED ON "
870		    "syscall(0x%x, 0x%lx, 0x%lx, 0x%lx...) EXIT, PID %d\n",
871		    curcpu()->ci_cpl, code, args[0], args[1], args[2],
872		    p->p_p->ps_pid);
873		curcpu()->ci_cpl = oldcpl;
874	}
875#endif
876	splx(curcpu()->ci_cpl);	/* process softints */
877}
878
879/*
880 * Decide if opcode `opcode' accessing virtual address `va' caused an
881 * unaligned trap. Returns zero if the access is correctly aligned.
882 * Used on PCXS processors to sort out exception causes.
883 */
884int
885pcxs_unaligned(u_int opcode, vaddr_t va)
886{
887	u_int mbz_bits;
888
889	/*
890	 * Exit early if the va is obviously aligned enough.
891	 */
892	if ((va & 0x0f) == 0)
893		return 0;
894
895	mbz_bits = 0;
896
897	/*
898	 * Only load and store instructions can cause unaligned access.
899	 * There are three opcode patterns to look for:
900	 * - canonical load/store
901	 * - load/store short or indexed
902	 * - coprocessor load/store
903	 */
904
905	if ((opcode & 0xd0000000) == 0x40000000) {
906		switch ((opcode >> 26) & 0x03) {
907		case 0x00:	/* ldb, stb */
908			mbz_bits = 0x00;
909			break;
910		case 0x01:	/* ldh, sth */
911			mbz_bits = 0x01;
912			break;
913		case 0x02:	/* ldw, stw */
914		case 0x03:	/* ldwm, stwm */
915			mbz_bits = 0x03;
916			break;
917		}
918	} else
919
920	if ((opcode & 0xfc000000) == 0x0c000000) {
921		switch ((opcode >> 6) & 0x0f) {
922		case 0x01:	/* ldhx, ldhs */
923			mbz_bits = 0x01;
924			break;
925		case 0x02:	/* ldwx, ldws */
926			mbz_bits = 0x03;
927			break;
928		case 0x07:	/* ldcwx, ldcws */
929			mbz_bits = 0x0f;
930			break;
931		case 0x09:
932			if ((opcode & (1 << 12)) != 0)	/* sths */
933				mbz_bits = 0x01;
934			break;
935		case 0x0a:
936			if ((opcode & (1 << 12)) != 0)	/* stws */
937				mbz_bits = 0x03;
938			break;
939		}
940	} else
941
942	if ((opcode & 0xf4000000) == 0x24000000) {
943		if ((opcode & (1 << 27)) != 0) {
944			/* cldwx, cstwx, cldws, cstws */
945			mbz_bits = 0x03;
946		} else {
947			/* clddx, cstdx, cldds, cstds */
948			mbz_bits = 0x07;
949		}
950	}
951
952	return (va & mbz_bits);
953}
954