trap.c revision 1.162
1/*	$OpenBSD: trap.c,v 1.162 2023/12/12 15:30:55 deraadt Exp $	*/
2
3/*
4 * Copyright (c) 1998-2004 Michael Shalayeff
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/* #define TRAPDEBUG */
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/syscall.h>
34#include <sys/proc.h>
35#include <sys/signalvar.h>
36#include <sys/user.h>
37#include <sys/syscall_mi.h>
38
39#include <uvm/uvm_extern.h>
40
41#include <machine/autoconf.h>
42
43#ifdef DDB
44#ifdef TRAPDEBUG
45#include <ddb/db_output.h>
46#else
47#include <machine/db_machdep.h>
48#endif
49#endif
50
51static __inline int inst_store(u_int ins) {
52	return (ins & 0xf0000000) == 0x60000000 ||	/* st */
53	       (ins & 0xf4000200) == 0x24000200 ||	/* fst/cst */
54	       (ins & 0xfc000200) == 0x0c000200 ||	/* stby */
55	       (ins & 0xfc0003c0) == 0x0c0001c0;	/* ldcw */
56}
57
58int	pcxs_unaligned(u_int opcode, vaddr_t va);
59#ifdef PTRACE
60void	ss_clear_breakpoints(struct proc *p);
61#endif
62
63void	ast(struct proc *);
64
65/* single-step breakpoint */
66#define SSBREAKPOINT	(HPPA_BREAK_KERNEL | (HPPA_BREAK_SS << 13))
67
68const char *trap_type[] = {
69	"invalid",
70	"HPMC",
71	"power failure",
72	"recovery counter",
73	"external interrupt",
74	"LPMC",
75	"ITLB miss fault",
76	"instruction protection",
77	"Illegal instruction",
78	"break instruction",
79	"privileged operation",
80	"privileged register",
81	"overflow",
82	"conditional",
83	"assist exception",
84	"DTLB miss",
85	"ITLB non-access miss",
86	"DTLB non-access miss",
87	"data protection/rights/alignment",
88	"data break",
89	"TLB dirty",
90	"page reference",
91	"assist emulation",
92	"higher-priv transfer",
93	"lower-priv transfer",
94	"taken branch",
95	"data access rights",
96	"data protection",
97	"unaligned data ref",
98};
99int trap_types = sizeof(trap_type)/sizeof(trap_type[0]);
100
101#define	frame_regmap(tf,r)	(((u_int *)(tf))[hppa_regmap[(r)]])
102u_char hppa_regmap[32] = {
103	offsetof(struct trapframe, tf_pad[0]) / 4,	/* r0 XXX */
104	offsetof(struct trapframe, tf_r1) / 4,
105	offsetof(struct trapframe, tf_rp) / 4,
106	offsetof(struct trapframe, tf_r3) / 4,
107	offsetof(struct trapframe, tf_r4) / 4,
108	offsetof(struct trapframe, tf_r5) / 4,
109	offsetof(struct trapframe, tf_r6) / 4,
110	offsetof(struct trapframe, tf_r7) / 4,
111	offsetof(struct trapframe, tf_r8) / 4,
112	offsetof(struct trapframe, tf_r9) / 4,
113	offsetof(struct trapframe, tf_r10) / 4,
114	offsetof(struct trapframe, tf_r11) / 4,
115	offsetof(struct trapframe, tf_r12) / 4,
116	offsetof(struct trapframe, tf_r13) / 4,
117	offsetof(struct trapframe, tf_r14) / 4,
118	offsetof(struct trapframe, tf_r15) / 4,
119	offsetof(struct trapframe, tf_r16) / 4,
120	offsetof(struct trapframe, tf_r17) / 4,
121	offsetof(struct trapframe, tf_r18) / 4,
122	offsetof(struct trapframe, tf_t4) / 4,
123	offsetof(struct trapframe, tf_t3) / 4,
124	offsetof(struct trapframe, tf_t2) / 4,
125	offsetof(struct trapframe, tf_t1) / 4,
126	offsetof(struct trapframe, tf_arg3) / 4,
127	offsetof(struct trapframe, tf_arg2) / 4,
128	offsetof(struct trapframe, tf_arg1) / 4,
129	offsetof(struct trapframe, tf_arg0) / 4,
130	offsetof(struct trapframe, tf_dp) / 4,
131	offsetof(struct trapframe, tf_ret0) / 4,
132	offsetof(struct trapframe, tf_ret1) / 4,
133	offsetof(struct trapframe, tf_sp) / 4,
134	offsetof(struct trapframe, tf_r31) / 4,
135};
136
137void
138ast(struct proc *p)
139{
140	if (p->p_md.md_astpending) {
141		p->p_md.md_astpending = 0;
142		uvmexp.softs++;
143		mi_ast(p, curcpu()->ci_want_resched);
144	}
145
146}
147
148void
149trap(int type, struct trapframe *frame)
150{
151	struct proc *p = curproc;
152	vaddr_t va;
153	struct vm_map *map;
154	struct vmspace *vm;
155	register vm_prot_t access_type;
156	register pa_space_t space;
157	union sigval sv;
158	u_int opcode;
159	int ret, trapnum;
160	const char *tts;
161#ifdef DIAGNOSTIC
162	int oldcpl = curcpu()->ci_cpl;
163#endif
164
165	trapnum = type & ~T_USER;
166	opcode = frame->tf_iir;
167	if (trapnum <= T_EXCEPTION || trapnum == T_HIGHERPL ||
168	    trapnum == T_LOWERPL || trapnum == T_TAKENBR ||
169	    trapnum == T_IDEBUG || trapnum == T_PERFMON) {
170		va = frame->tf_iioq_head;
171		space = frame->tf_iisq_head;
172		access_type = PROT_EXEC;
173	} else {
174		va = frame->tf_ior;
175		space = frame->tf_isr;
176		if (va == frame->tf_iioq_head)
177			access_type = PROT_EXEC;
178		else if (inst_store(opcode))
179			access_type = PROT_WRITE;
180		else
181			access_type = PROT_READ;
182	}
183
184	if (frame->tf_flags & TFF_LAST)
185		p->p_md.md_regs = frame;
186
187	if (trapnum > trap_types)
188		tts = "reserved";
189	else
190		tts = trap_type[trapnum];
191
192#ifdef TRAPDEBUG
193	if (trapnum != T_INTERRUPT && trapnum != T_IBREAK)
194		db_printf("trap: %x, %s for %x:%x at %x:%x, fl=%x, fp=%p\n",
195		    type, tts, space, va, frame->tf_iisq_head,
196		    frame->tf_iioq_head, frame->tf_flags, frame);
197	else if (trapnum  == T_IBREAK)
198		db_printf("trap: break instruction %x:%x at %x:%x, fp=%p\n",
199		    break5(opcode), break13(opcode),
200		    frame->tf_iisq_head, frame->tf_iioq_head, frame);
201
202	{
203		extern int etext;
204		if (frame < (struct trapframe *)&etext) {
205			printf("trap: bogus frame ptr %p\n", frame);
206			goto dead_end;
207		}
208	}
209#endif
210	if (trapnum != T_INTERRUPT) {
211		uvmexp.traps++;
212		mtctl(frame->tf_eiem, CR_EIEM);
213	}
214
215	if (type & T_USER)
216		refreshcreds(p);
217
218	switch (type) {
219	case T_NONEXIST:
220	case T_NONEXIST | T_USER:
221		/* we've got screwed up by the central scrutinizer */
222		printf("trap: elvis has just left the building!\n");
223		goto dead_end;
224
225	case T_RECOVERY:
226	case T_RECOVERY | T_USER:
227		/* XXX will implement later */
228		printf("trap: handicapped");
229		goto dead_end;
230
231#ifdef DIAGNOSTIC
232	case T_EXCEPTION:
233		panic("FPU/SFU emulation botch");
234
235		/* these just can't happen ever */
236	case T_PRIV_OP:
237	case T_PRIV_REG:
238		/* these just can't make it to the trap() ever */
239	case T_HPMC:
240	case T_HPMC | T_USER:
241#endif
242	case T_IBREAK:
243	case T_DATALIGN:
244	case T_DBREAK:
245	dead_end:
246#ifdef DDB
247		if (db_ktrap(type, va, frame)) {
248			if (type == T_IBREAK) {
249				/* skip break instruction */
250				frame->tf_iioq_head = frame->tf_iioq_tail;
251				frame->tf_iioq_tail += 4;
252			}
253			return;
254		}
255#else
256		if (type == T_DATALIGN || type == T_DPROT)
257			panic ("trap: %s at 0x%lx", tts, va);
258		else
259			panic ("trap: no debugger for \"%s\" (%d)", tts, type);
260#endif
261		break;
262
263	case T_IBREAK | T_USER:
264	case T_DBREAK | T_USER: {
265		int code = TRAP_BRKPT;
266
267#ifdef PTRACE
268		KERNEL_LOCK();
269		ss_clear_breakpoints(p);
270		if (opcode == SSBREAKPOINT)
271			code = TRAP_TRACE;
272		KERNEL_UNLOCK();
273#endif
274		/* pass to user debugger */
275		sv.sival_int = va;
276		trapsignal(p, SIGTRAP, type & ~T_USER, code, sv);
277		}
278		break;
279
280#ifdef PTRACE
281	case T_TAKENBR | T_USER:
282		KERNEL_LOCK();
283		ss_clear_breakpoints(p);
284		KERNEL_UNLOCK();
285		/* pass to user debugger */
286		sv.sival_int = va;
287		trapsignal(p, SIGTRAP, type & ~T_USER, TRAP_TRACE, sv);
288		break;
289#endif
290
291	case T_EXCEPTION | T_USER: {
292		struct hppa_fpstate *hfp;
293		u_int64_t *fpp;
294		u_int32_t *pex;
295		int i, flt;
296
297		hfp = (struct hppa_fpstate *)frame->tf_cr30;
298		fpp = (u_int64_t *)&hfp->hfp_regs;
299
300		pex = (u_int32_t *)&fpp[0];
301		for (i = 0, pex++; i < 7 && !*pex; i++, pex++);
302		flt = 0;
303		if (i < 7) {
304			u_int32_t stat = HPPA_FPU_OP(*pex);
305			if (stat & HPPA_FPU_UNMPL)
306				flt = FPE_FLTINV;
307			else if (stat & (HPPA_FPU_V << 1))
308				flt = FPE_FLTINV;
309			else if (stat & (HPPA_FPU_Z << 1))
310				flt = FPE_FLTDIV;
311			else if (stat & (HPPA_FPU_I << 1))
312				flt = FPE_FLTRES;
313			else if (stat & (HPPA_FPU_O << 1))
314				flt = FPE_FLTOVF;
315			else if (stat & (HPPA_FPU_U << 1))
316				flt = FPE_FLTUND;
317			/* still left: under/over-flow w/ inexact */
318
319			/* cleanup exceptions (XXX deliver all ?) */
320			while (i++ < 7)
321				*pex++ = 0;
322		}
323		/* reset the trap flag, as if there was none */
324		fpp[0] &= ~(((u_int64_t)HPPA_FPU_T) << 32);
325
326		sv.sival_int = va;
327		trapsignal(p, SIGFPE, type & ~T_USER, flt, sv);
328		}
329		break;
330
331	case T_EMULATION:
332		panic("trap: emulation trap in the kernel");
333		break;
334
335	case T_EMULATION | T_USER:
336		sv.sival_int = va;
337		trapsignal(p, SIGILL, type & ~T_USER, ILL_COPROC, sv);
338		break;
339
340	case T_OVERFLOW | T_USER:
341		sv.sival_int = va;
342		trapsignal(p, SIGFPE, type & ~T_USER, FPE_INTOVF, sv);
343		break;
344
345	case T_CONDITION | T_USER:
346		sv.sival_int = va;
347		trapsignal(p, SIGFPE, type & ~T_USER, FPE_INTDIV, sv);
348		break;
349
350	case T_PRIV_OP | T_USER:
351		sv.sival_int = va;
352		trapsignal(p, SIGILL, type & ~T_USER, ILL_PRVOPC, sv);
353		break;
354
355	case T_PRIV_REG | T_USER:
356		/*
357		 * On PCXS processors, attempting to read control registers
358		 * cr26 and cr27 from userland causes a ``privileged register''
359		 * trap.  Later processors do not restrict read accesses to
360		 * these registers.
361		 */
362		if (cpu_type == hpcxs &&
363		    (opcode & (0xfc1fffe0 | (0x1e << 21))) ==
364		     (0x000008a0 | (0x1a << 21))) { /* mfctl %cr{26,27}, %r# */
365			register_t cr;
366
367			if (((opcode >> 21) & 0x1f) == 27)
368				cr = frame->tf_cr27;	/* cr27 */
369			else
370				cr = 0;			/* cr26 */
371			frame_regmap(frame, opcode & 0x1f) = cr;
372			frame->tf_ipsw |= PSL_N;
373		} else {
374			sv.sival_int = va;
375			trapsignal(p, SIGILL, type & ~T_USER, ILL_PRVREG, sv);
376		}
377		break;
378
379		/* these should never got here */
380	case T_HIGHERPL | T_USER:
381	case T_LOWERPL | T_USER:
382	case T_DATAPID | T_USER:
383		sv.sival_int = va;
384		trapsignal(p, SIGSEGV, access_type, SEGV_ACCERR, sv);
385		break;
386
387	/*
388	 * On PCXS processors, traps T_DATACC, T_DATAPID and T_DATALIGN
389	 * are shared.  We need to sort out the unaligned access situation
390	 * first, before handling this trap as T_DATACC.
391	 */
392	case T_DPROT | T_USER:
393		if (cpu_type == hpcxs) {
394			if (pcxs_unaligned(opcode, va))
395				goto datalign_user;
396			else
397				goto datacc;
398		}
399
400		sv.sival_int = va;
401		trapsignal(p, SIGSEGV, access_type, SEGV_ACCERR, sv);
402		break;
403
404	case T_ITLBMISSNA:
405	case T_ITLBMISSNA | T_USER:
406	case T_DTLBMISSNA:
407	case T_DTLBMISSNA | T_USER:
408		if (space == HPPA_SID_KERNEL)
409			map = kernel_map;
410		else {
411			vm = p->p_vmspace;
412			map = &vm->vm_map;
413		}
414
415		if ((opcode & 0xfc003fc0) == 0x04001340) {
416			/* lpa failure case */
417			frame_regmap(frame, opcode & 0x1f) = 0;
418			frame->tf_ipsw |= PSL_N;
419		} else if ((opcode & 0xfc001f80) == 0x04001180) {
420			int pl;
421
422			/* dig probe[rw]i? insns */
423			if (opcode & 0x2000)
424				pl = (opcode >> 16) & 3;
425			else
426				pl = frame_regmap(frame,
427				    (opcode >> 16) & 0x1f) & 3;
428
429			KERNEL_LOCK();
430
431			if ((type & T_USER && space == HPPA_SID_KERNEL) ||
432			    (frame->tf_iioq_head & 3) != pl ||
433			    (type & T_USER && va >= VM_MAXUSER_ADDRESS) ||
434			    uvm_fault(map, trunc_page(va), 0,
435			     opcode & 0x40? PROT_WRITE : PROT_READ)) {
436				frame_regmap(frame, opcode & 0x1f) = 0;
437				frame->tf_ipsw |= PSL_N;
438			}
439
440			KERNEL_UNLOCK();
441		} else if (type & T_USER) {
442			sv.sival_int = va;
443			trapsignal(p, SIGILL, type & ~T_USER, ILL_ILLTRP, sv);
444		} else
445			panic("trap: %s @ 0x%lx:0x%lx for 0x%x:0x%lx irr 0x%08x",
446			    tts, frame->tf_iisq_head, frame->tf_iioq_head,
447			    space, va, opcode);
448		break;
449
450	case T_IPROT | T_USER:
451	case T_TLB_DIRTY:
452	case T_TLB_DIRTY | T_USER:
453	case T_DATACC:
454	case T_DATACC | T_USER:
455datacc:
456	case T_ITLBMISS:
457	case T_ITLBMISS | T_USER:
458	case T_DTLBMISS:
459	case T_DTLBMISS | T_USER:
460		if (type & T_USER) {
461			if (!uvm_map_inentry(p, &p->p_spinentry, PROC_STACK(p),
462			    "[%s]%d/%d sp=%lx inside %lx-%lx: not MAP_STACK\n",
463			    uvm_map_inentry_sp, p->p_vmspace->vm_map.sserial))
464				goto out;
465		}
466
467		/*
468		 * it could be a kernel map for exec_map faults
469		 */
470		if (space == HPPA_SID_KERNEL)
471			map = kernel_map;
472		else {
473			vm = p->p_vmspace;
474			map = &vm->vm_map;
475		}
476
477		/*
478		 * user faults out of user addr space are always a fail,
479		 * this happens on va >= VM_MAXUSER_ADDRESS, where
480		 * space id will be zero and therefore cause
481		 * a misbehave lower in the code.
482		 *
483		 * also check that faulted space id matches the curproc.
484		 */
485		if ((type & T_USER && va >= VM_MAXUSER_ADDRESS) ||
486		   (type & T_USER && map->pmap->pm_space != space)) {
487			sv.sival_int = va;
488			trapsignal(p, SIGSEGV, access_type, SEGV_MAPERR, sv);
489			break;
490		}
491
492		KERNEL_LOCK();
493		ret = uvm_fault(map, trunc_page(va), 0, access_type);
494		KERNEL_UNLOCK();
495
496		/*
497		 * If this was a stack access we keep track of the maximum
498		 * accessed stack size.  Also, if uvm_fault gets a protection
499		 * failure it is due to accessing the stack region outside
500		 * the current limit and we need to reflect that as an access
501		 * error.
502		 */
503		if (ret == 0 && space != HPPA_SID_KERNEL)
504			uvm_grow(p, va);
505
506		if (ret != 0) {
507			if (type & T_USER) {
508				int signal, sicode;
509
510				signal = SIGSEGV;
511				sicode = SEGV_MAPERR;
512				if (ret == EACCES)
513					sicode = SEGV_ACCERR;
514				if (ret == EIO) {
515					signal = SIGBUS;
516					sicode = BUS_OBJERR;
517				}
518				sv.sival_int = va;
519				trapsignal(p, signal, access_type, sicode, sv);
520			} else {
521				if (p && p->p_addr->u_pcb.pcb_onfault) {
522					frame->tf_iioq_tail = 4 +
523					    (frame->tf_iioq_head =
524						p->p_addr->u_pcb.pcb_onfault);
525#ifdef DDB
526					frame->tf_iir = 0;
527#endif
528				} else {
529					panic("trap: "
530					    "uvm_fault(%p, %lx, 0, %d): %d",
531					    map, va, access_type, ret);
532				}
533			}
534		}
535		break;
536
537	case T_DATAPID:
538		/* This should never happen, unless within spcopy() */
539		if (p && p->p_addr->u_pcb.pcb_onfault) {
540			frame->tf_iioq_tail = 4 +
541			    (frame->tf_iioq_head =
542				p->p_addr->u_pcb.pcb_onfault);
543#ifdef DDB
544			frame->tf_iir = 0;
545#endif
546		} else
547			goto dead_end;
548		break;
549
550	case T_DATALIGN | T_USER:
551datalign_user:
552		sv.sival_int = va;
553		trapsignal(p, SIGBUS, access_type, BUS_ADRALN, sv);
554		break;
555
556	case T_INTERRUPT:
557	case T_INTERRUPT | T_USER:
558		cpu_intr(frame);
559		break;
560
561	case T_CONDITION:
562		panic("trap: divide by zero in the kernel");
563		break;
564
565	case T_ILLEGAL:
566	case T_ILLEGAL | T_USER:
567		/* see if it's a SPOP1,,0 */
568		if ((opcode & 0xfffffe00) == 0x10000200) {
569			frame_regmap(frame, opcode & 0x1f) = 0;
570			frame->tf_ipsw |= PSL_N;
571			break;
572		}
573		if (type & T_USER) {
574			sv.sival_int = va;
575			trapsignal(p, SIGILL, type & ~T_USER, ILL_ILLOPC, sv);
576			break;
577		}
578		/* FALLTHROUGH */
579
580	/*
581	 * On PCXS processors, traps T_DATACC, T_DATAPID and T_DATALIGN
582	 * are shared.  We need to sort out the unaligned access situation
583	 * first, before handling this trap as T_DATACC.
584	 */
585	case T_DPROT:
586		if (cpu_type == hpcxs) {
587			if (pcxs_unaligned(opcode, va))
588				goto dead_end;
589			else
590				goto datacc;
591		}
592		/* FALLTHROUGH to unimplemented */
593
594	case T_LOWERPL:
595	case T_IPROT:
596	case T_OVERFLOW:
597	case T_HIGHERPL:
598	case T_TAKENBR:
599	case T_POWERFAIL:
600	case T_LPMC:
601	case T_PAGEREF:
602		/* FALLTHROUGH to unimplemented */
603	default:
604#ifdef TRAPDEBUG
605		if (db_ktrap(type, va, frame))
606			return;
607#endif
608		panic("trap: unimplemented \'%s\' (%d)", tts, trapnum);
609	}
610
611#ifdef DIAGNOSTIC
612	if (curcpu()->ci_cpl != oldcpl)
613		printf("WARNING: SPL (%d) NOT LOWERED ON "
614		    "TRAP (%d) EXIT\n", curcpu()->ci_cpl, trapnum);
615#endif
616
617	if (trapnum != T_INTERRUPT)
618		splx(curcpu()->ci_cpl);	/* process softints */
619
620	/*
621	 * in case we were interrupted from the syscall gate page
622	 * treat this as we were not really running user code no more
623	 * for weird things start to happen on return to the userland
624	 * and also see a note in locore.S:TLABEL(all)
625	 */
626	if ((type & T_USER) && !(frame->tf_iisq_head == HPPA_SID_KERNEL &&
627	    (frame->tf_iioq_head & ~PAGE_MASK) == SYSCALLGATE)) {
628		ast(p);
629out:
630		userret(p);
631	}
632}
633
634void
635child_return(void *arg)
636{
637	struct proc *p = (struct proc *)arg;
638	struct trapframe *tf = p->p_md.md_regs;
639
640	/*
641	 * Set up return value registers as libc:fork() expects
642	 */
643	tf->tf_ret0 = 0;
644	tf->tf_t1 = 0;		/* errno */
645
646	KERNEL_UNLOCK();
647
648	ast(p);
649
650	mi_child_return(p);
651}
652
653#ifdef PTRACE
654
655#include <sys/ptrace.h>
656
657int	ss_get_value(struct proc *p, vaddr_t addr, u_int *value);
658int	ss_put_value(struct proc *p, vaddr_t addr, u_int value);
659
660int
661ss_get_value(struct proc *p, vaddr_t addr, u_int *value)
662{
663	struct uio uio;
664	struct iovec iov;
665
666	iov.iov_base = (caddr_t)value;
667	iov.iov_len = sizeof(u_int);
668	uio.uio_iov = &iov;
669	uio.uio_iovcnt = 1;
670	uio.uio_offset = (off_t)addr;
671	uio.uio_resid = sizeof(u_int);
672	uio.uio_segflg = UIO_SYSSPACE;
673	uio.uio_rw = UIO_READ;
674	uio.uio_procp = curproc;
675	return (process_domem(curproc, p->p_p, &uio, PT_READ_I));
676}
677
678int
679ss_put_value(struct proc *p, vaddr_t addr, u_int value)
680{
681	struct uio uio;
682	struct iovec iov;
683
684	iov.iov_base = (caddr_t)&value;
685	iov.iov_len = sizeof(u_int);
686	uio.uio_iov = &iov;
687	uio.uio_iovcnt = 1;
688	uio.uio_offset = (off_t)addr;
689	uio.uio_resid = sizeof(u_int);
690	uio.uio_segflg = UIO_SYSSPACE;
691	uio.uio_rw = UIO_WRITE;
692	uio.uio_procp = curproc;
693	return (process_domem(curproc, p->p_p, &uio, PT_WRITE_I));
694}
695
696void
697ss_clear_breakpoints(struct proc *p)
698{
699	/* Restore original instructions. */
700	if (p->p_md.md_bpva != 0) {
701		ss_put_value(p, p->p_md.md_bpva, p->p_md.md_bpsave[0]);
702		ss_put_value(p, p->p_md.md_bpva + 4, p->p_md.md_bpsave[1]);
703		p->p_md.md_bpva = 0;
704	}
705}
706
707int
708process_sstep(struct proc *p, int sstep)
709{
710	int error;
711
712	ss_clear_breakpoints(p);
713
714	if (sstep == 0) {
715		p->p_md.md_regs->tf_ipsw &= ~PSL_T;
716		return (0);
717	}
718
719	/*
720	 * Don't touch the syscall gateway page.  Instead, insert a
721	 * breakpoint where we're supposed to return.
722	 */
723	if ((p->p_md.md_regs->tf_iioq_tail & ~PAGE_MASK) == SYSCALLGATE)
724		p->p_md.md_bpva = p->p_md.md_regs->tf_r31 & ~HPPA_PC_PRIV_MASK;
725	else
726		p->p_md.md_bpva = p->p_md.md_regs->tf_iioq_tail & ~HPPA_PC_PRIV_MASK;
727
728	/*
729	 * Insert two breakpoint instructions; the first one might be
730	 * nullified.  Of course we need to save two instruction
731	 * first.
732	 */
733
734	error = ss_get_value(p, p->p_md.md_bpva, &p->p_md.md_bpsave[0]);
735	if (error)
736		return (error);
737	error = ss_get_value(p, p->p_md.md_bpva + 4, &p->p_md.md_bpsave[1]);
738	if (error)
739		return (error);
740
741	error = ss_put_value(p, p->p_md.md_bpva, SSBREAKPOINT);
742	if (error)
743		return (error);
744	error = ss_put_value(p, p->p_md.md_bpva + 4, SSBREAKPOINT);
745	if (error)
746		return (error);
747
748	if ((p->p_md.md_regs->tf_iioq_tail & ~PAGE_MASK) != SYSCALLGATE)
749		p->p_md.md_regs->tf_ipsw |= PSL_T;
750	else
751		p->p_md.md_regs->tf_ipsw &= ~PSL_T;
752
753	return (0);
754}
755
756#endif	/* PTRACE */
757
758void	syscall(struct trapframe *frame);
759
760/*
761 * call actual syscall routine
762 */
763void
764syscall(struct trapframe *frame)
765{
766	struct proc *p = curproc;
767	const struct sysent *callp = sysent;
768	int retq, code, argsize, argoff, error;
769	register_t args[8], rval[2];
770#ifdef DIAGNOSTIC
771	int oldcpl = curcpu()->ci_cpl;
772#endif
773
774	uvmexp.syscalls++;
775
776	if (!USERMODE(frame->tf_iioq_head))
777		panic("syscall");
778
779	p->p_md.md_regs = frame;
780
781	argoff = 4; retq = 0;
782	args[0] = frame->tf_arg0;
783	args[1] = frame->tf_arg1;
784	args[2] = frame->tf_arg2;
785	args[3] = frame->tf_arg3;
786
787	// XXX out of range stays on syscall0, which we assume is enosys
788	if (code >= 0 || code <= SYS_MAXSYSCALL)
789		callp += code;
790
791	if ((argsize = callp->sy_argsize)) {
792		register_t *s, *e, t;
793		int i;
794
795		argsize -= argoff * 4;
796		if (argsize > 0) {
797			i = argsize / 4;
798			if ((error = copyin((void *)(frame->tf_sp +
799			    HPPA_FRAME_ARG(4 + i - 1)), args + argoff,
800			    argsize)))
801				goto bad;
802			/* reverse the args[] entries */
803			s = args + argoff;
804			e = s + i - 1;
805			while (s < e) {
806				t = *s;
807				*s = *e;
808				*e = t;
809				s++, e--;
810			}
811		}
812
813		/*
814		 * System calls with 64-bit arguments need a word swap
815		 * due to the order of the arguments on the stack.
816		 */
817		i = 0;
818		switch (code) {
819		case SYS_lseek:		retq = 0;
820		case SYS_truncate:
821		case SYS_ftruncate:	i = 2;	break;
822		case SYS_preadv:
823		case SYS_pwritev:
824		case SYS_pread:
825		case SYS_pwrite:	i = 4;	break;
826		case SYS_mquery:
827		case SYS_mmap:		i = 6;	break;
828		}
829
830		if (i) {
831			t = args[i];
832			args[i] = args[i + 1];
833			args[i + 1] = t;
834		}
835	}
836
837	rval[0] = 0;
838	rval[1] = frame->tf_ret1;
839
840	error = mi_syscall(p, code, callp, args, rval);
841
842	switch (error) {
843	case 0:
844		frame->tf_ret0 = rval[0];
845		frame->tf_ret1 = rval[!retq];
846		frame->tf_t1 = 0;
847		break;
848	case ERESTART:
849		frame->tf_iioq_head -= 12;
850		frame->tf_iioq_tail -= 12;
851	case EJUSTRETURN:
852		break;
853	default:
854	bad:
855		frame->tf_t1 = error;
856		frame->tf_ret0 = error;
857		frame->tf_ret1 = 0;
858		break;
859	}
860
861	ast(p);		// XXX why?
862
863	mi_syscall_return(p, code, error, rval);
864
865#ifdef DIAGNOSTIC
866	if (curcpu()->ci_cpl != oldcpl) {
867		printf("WARNING: SPL (0x%x) NOT LOWERED ON "
868		    "syscall(0x%x, 0x%lx, 0x%lx, 0x%lx...) EXIT, PID %d\n",
869		    curcpu()->ci_cpl, code, args[0], args[1], args[2],
870		    p->p_p->ps_pid);
871		curcpu()->ci_cpl = oldcpl;
872	}
873#endif
874	splx(curcpu()->ci_cpl);	/* process softints */
875}
876
877/*
878 * Decide if opcode `opcode' accessing virtual address `va' caused an
879 * unaligned trap. Returns zero if the access is correctly aligned.
880 * Used on PCXS processors to sort out exception causes.
881 */
882int
883pcxs_unaligned(u_int opcode, vaddr_t va)
884{
885	u_int mbz_bits;
886
887	/*
888	 * Exit early if the va is obviously aligned enough.
889	 */
890	if ((va & 0x0f) == 0)
891		return 0;
892
893	mbz_bits = 0;
894
895	/*
896	 * Only load and store instructions can cause unaligned access.
897	 * There are three opcode patterns to look for:
898	 * - canonical load/store
899	 * - load/store short or indexed
900	 * - coprocessor load/store
901	 */
902
903	if ((opcode & 0xd0000000) == 0x40000000) {
904		switch ((opcode >> 26) & 0x03) {
905		case 0x00:	/* ldb, stb */
906			mbz_bits = 0x00;
907			break;
908		case 0x01:	/* ldh, sth */
909			mbz_bits = 0x01;
910			break;
911		case 0x02:	/* ldw, stw */
912		case 0x03:	/* ldwm, stwm */
913			mbz_bits = 0x03;
914			break;
915		}
916	} else
917
918	if ((opcode & 0xfc000000) == 0x0c000000) {
919		switch ((opcode >> 6) & 0x0f) {
920		case 0x01:	/* ldhx, ldhs */
921			mbz_bits = 0x01;
922			break;
923		case 0x02:	/* ldwx, ldws */
924			mbz_bits = 0x03;
925			break;
926		case 0x07:	/* ldcwx, ldcws */
927			mbz_bits = 0x0f;
928			break;
929		case 0x09:
930			if ((opcode & (1 << 12)) != 0)	/* sths */
931				mbz_bits = 0x01;
932			break;
933		case 0x0a:
934			if ((opcode & (1 << 12)) != 0)	/* stws */
935				mbz_bits = 0x03;
936			break;
937		}
938	} else
939
940	if ((opcode & 0xf4000000) == 0x24000000) {
941		if ((opcode & (1 << 27)) != 0) {
942			/* cldwx, cstwx, cldws, cstws */
943			mbz_bits = 0x03;
944		} else {
945			/* clddx, cstdx, cldds, cstds */
946			mbz_bits = 0x07;
947		}
948	}
949
950	return (va & mbz_bits);
951}
952