trap.c revision 1.150
1/*	$OpenBSD: trap.c,v 1.150 2020/10/08 19:41:04 deraadt Exp $	*/
2
3/*
4 * Copyright (c) 1998-2004 Michael Shalayeff
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/* #define TRAPDEBUG */
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/syscall.h>
34#include <sys/proc.h>
35#include <sys/signalvar.h>
36#include <sys/user.h>
37#include <sys/syscall_mi.h>
38
39#include <uvm/uvm_extern.h>
40
41#include <machine/autoconf.h>
42
43#ifdef DDB
44#ifdef TRAPDEBUG
45#include <ddb/db_output.h>
46#else
47#include <machine/db_machdep.h>
48#endif
49#endif
50
51static __inline int inst_store(u_int ins) {
52	return (ins & 0xf0000000) == 0x60000000 ||	/* st */
53	       (ins & 0xf4000200) == 0x24000200 ||	/* fst/cst */
54	       (ins & 0xfc000200) == 0x0c000200 ||	/* stby */
55	       (ins & 0xfc0003c0) == 0x0c0001c0;	/* ldcw */
56}
57
58int	pcxs_unaligned(u_int opcode, vaddr_t va);
59#ifdef PTRACE
60void	ss_clear_breakpoints(struct proc *p);
61#endif
62
63void	ast(struct proc *);
64
65/* single-step breakpoint */
66#define SSBREAKPOINT	(HPPA_BREAK_KERNEL | (HPPA_BREAK_SS << 13))
67
68const char *trap_type[] = {
69	"invalid",
70	"HPMC",
71	"power failure",
72	"recovery counter",
73	"external interrupt",
74	"LPMC",
75	"ITLB miss fault",
76	"instruction protection",
77	"Illegal instruction",
78	"break instruction",
79	"privileged operation",
80	"privileged register",
81	"overflow",
82	"conditional",
83	"assist exception",
84	"DTLB miss",
85	"ITLB non-access miss",
86	"DTLB non-access miss",
87	"data protection/rights/alignment",
88	"data break",
89	"TLB dirty",
90	"page reference",
91	"assist emulation",
92	"higher-priv transfer",
93	"lower-priv transfer",
94	"taken branch",
95	"data access rights",
96	"data protection",
97	"unaligned data ref",
98};
99int trap_types = sizeof(trap_type)/sizeof(trap_type[0]);
100
101#define	frame_regmap(tf,r)	(((u_int *)(tf))[hppa_regmap[(r)]])
102u_char hppa_regmap[32] = {
103	offsetof(struct trapframe, tf_pad[0]) / 4,	/* r0 XXX */
104	offsetof(struct trapframe, tf_r1) / 4,
105	offsetof(struct trapframe, tf_rp) / 4,
106	offsetof(struct trapframe, tf_r3) / 4,
107	offsetof(struct trapframe, tf_r4) / 4,
108	offsetof(struct trapframe, tf_r5) / 4,
109	offsetof(struct trapframe, tf_r6) / 4,
110	offsetof(struct trapframe, tf_r7) / 4,
111	offsetof(struct trapframe, tf_r8) / 4,
112	offsetof(struct trapframe, tf_r9) / 4,
113	offsetof(struct trapframe, tf_r10) / 4,
114	offsetof(struct trapframe, tf_r11) / 4,
115	offsetof(struct trapframe, tf_r12) / 4,
116	offsetof(struct trapframe, tf_r13) / 4,
117	offsetof(struct trapframe, tf_r14) / 4,
118	offsetof(struct trapframe, tf_r15) / 4,
119	offsetof(struct trapframe, tf_r16) / 4,
120	offsetof(struct trapframe, tf_r17) / 4,
121	offsetof(struct trapframe, tf_r18) / 4,
122	offsetof(struct trapframe, tf_t4) / 4,
123	offsetof(struct trapframe, tf_t3) / 4,
124	offsetof(struct trapframe, tf_t2) / 4,
125	offsetof(struct trapframe, tf_t1) / 4,
126	offsetof(struct trapframe, tf_arg3) / 4,
127	offsetof(struct trapframe, tf_arg2) / 4,
128	offsetof(struct trapframe, tf_arg1) / 4,
129	offsetof(struct trapframe, tf_arg0) / 4,
130	offsetof(struct trapframe, tf_dp) / 4,
131	offsetof(struct trapframe, tf_ret0) / 4,
132	offsetof(struct trapframe, tf_ret1) / 4,
133	offsetof(struct trapframe, tf_sp) / 4,
134	offsetof(struct trapframe, tf_r31) / 4,
135};
136
137void
138ast(struct proc *p)
139{
140	if (p->p_md.md_astpending) {
141		p->p_md.md_astpending = 0;
142		uvmexp.softs++;
143		mi_ast(p, curcpu()->ci_want_resched);
144	}
145
146}
147
148void
149trap(int type, struct trapframe *frame)
150{
151	struct proc *p = curproc;
152	vaddr_t va;
153	struct vm_map *map;
154	struct vmspace *vm;
155	register vm_prot_t access_type;
156	register pa_space_t space;
157	union sigval sv;
158	u_int opcode;
159	int ret, trapnum;
160	const char *tts;
161#ifdef DIAGNOSTIC
162	int oldcpl = curcpu()->ci_cpl;
163#endif
164
165	trapnum = type & ~T_USER;
166	opcode = frame->tf_iir;
167	if (trapnum <= T_EXCEPTION || trapnum == T_HIGHERPL ||
168	    trapnum == T_LOWERPL || trapnum == T_TAKENBR ||
169	    trapnum == T_IDEBUG || trapnum == T_PERFMON) {
170		va = frame->tf_iioq_head;
171		space = frame->tf_iisq_head;
172		access_type = PROT_EXEC;
173	} else {
174		va = frame->tf_ior;
175		space = frame->tf_isr;
176		if (va == frame->tf_iioq_head)
177			access_type = PROT_EXEC;
178		else if (inst_store(opcode))
179			access_type = PROT_WRITE;
180		else
181			access_type = PROT_READ;
182	}
183
184	if (frame->tf_flags & TFF_LAST)
185		p->p_md.md_regs = frame;
186
187	if (trapnum > trap_types)
188		tts = "reserved";
189	else
190		tts = trap_type[trapnum];
191
192#ifdef TRAPDEBUG
193	if (trapnum != T_INTERRUPT && trapnum != T_IBREAK)
194		db_printf("trap: %x, %s for %x:%x at %x:%x, fl=%x, fp=%p\n",
195		    type, tts, space, va, frame->tf_iisq_head,
196		    frame->tf_iioq_head, frame->tf_flags, frame);
197	else if (trapnum  == T_IBREAK)
198		db_printf("trap: break instruction %x:%x at %x:%x, fp=%p\n",
199		    break5(opcode), break13(opcode),
200		    frame->tf_iisq_head, frame->tf_iioq_head, frame);
201
202	{
203		extern int etext;
204		if (frame < (struct trapframe *)&etext) {
205			printf("trap: bogus frame ptr %p\n", frame);
206			goto dead_end;
207		}
208	}
209#endif
210	if (trapnum != T_INTERRUPT) {
211		uvmexp.traps++;
212		mtctl(frame->tf_eiem, CR_EIEM);
213	}
214
215	if (type & T_USER)
216		refreshcreds(p);
217
218	switch (type) {
219	case T_NONEXIST:
220	case T_NONEXIST | T_USER:
221		/* we've got screwed up by the central scrutinizer */
222		printf("trap: elvis has just left the building!\n");
223		goto dead_end;
224
225	case T_RECOVERY:
226	case T_RECOVERY | T_USER:
227		/* XXX will implement later */
228		printf("trap: handicapped");
229		goto dead_end;
230
231#ifdef DIAGNOSTIC
232	case T_EXCEPTION:
233		panic("FPU/SFU emulation botch");
234
235		/* these just can't happen ever */
236	case T_PRIV_OP:
237	case T_PRIV_REG:
238		/* these just can't make it to the trap() ever */
239	case T_HPMC:
240	case T_HPMC | T_USER:
241#endif
242	case T_IBREAK:
243	case T_DATALIGN:
244	case T_DBREAK:
245	dead_end:
246#ifdef DDB
247		if (db_ktrap(type, va, frame)) {
248			if (type == T_IBREAK) {
249				/* skip break instruction */
250				frame->tf_iioq_head = frame->tf_iioq_tail;
251				frame->tf_iioq_tail += 4;
252			}
253			return;
254		}
255#else
256		if (type == T_DATALIGN || type == T_DPROT)
257			panic ("trap: %s at 0x%lx", tts, va);
258		else
259			panic ("trap: no debugger for \"%s\" (%d)", tts, type);
260#endif
261		break;
262
263	case T_IBREAK | T_USER:
264	case T_DBREAK | T_USER: {
265		int code = TRAP_BRKPT;
266
267		KERNEL_LOCK();
268#ifdef PTRACE
269		ss_clear_breakpoints(p);
270		if (opcode == SSBREAKPOINT)
271			code = TRAP_TRACE;
272#endif
273		/* pass to user debugger */
274		trapsignal(p, SIGTRAP, type & ~T_USER, code, sv);
275		KERNEL_UNLOCK();
276		}
277		break;
278
279#ifdef PTRACE
280	case T_TAKENBR | T_USER:
281		KERNEL_LOCK();
282		ss_clear_breakpoints(p);
283		/* pass to user debugger */
284		trapsignal(p, SIGTRAP, type & ~T_USER, TRAP_TRACE, sv);
285		KERNEL_UNLOCK();
286		break;
287#endif
288
289	case T_EXCEPTION | T_USER: {
290		struct hppa_fpstate *hfp;
291		u_int64_t *fpp;
292		u_int32_t *pex;
293		int i, flt;
294
295		hfp = (struct hppa_fpstate *)frame->tf_cr30;
296		fpp = (u_int64_t *)&hfp->hfp_regs;
297
298		pex = (u_int32_t *)&fpp[0];
299		for (i = 0, pex++; i < 7 && !*pex; i++, pex++);
300		flt = 0;
301		if (i < 7) {
302			u_int32_t stat = HPPA_FPU_OP(*pex);
303			if (stat & HPPA_FPU_UNMPL)
304				flt = FPE_FLTINV;
305			else if (stat & (HPPA_FPU_V << 1))
306				flt = FPE_FLTINV;
307			else if (stat & (HPPA_FPU_Z << 1))
308				flt = FPE_FLTDIV;
309			else if (stat & (HPPA_FPU_I << 1))
310				flt = FPE_FLTRES;
311			else if (stat & (HPPA_FPU_O << 1))
312				flt = FPE_FLTOVF;
313			else if (stat & (HPPA_FPU_U << 1))
314				flt = FPE_FLTUND;
315			/* still left: under/over-flow w/ inexact */
316
317			/* cleanup exceptions (XXX deliver all ?) */
318			while (i++ < 7)
319				*pex++ = 0;
320		}
321		/* reset the trap flag, as if there was none */
322		fpp[0] &= ~(((u_int64_t)HPPA_FPU_T) << 32);
323
324		sv.sival_int = va;
325		trapsignal(p, SIGFPE, type & ~T_USER, flt, sv);
326		}
327		break;
328
329	case T_EMULATION:
330		panic("trap: emulation trap in the kernel");
331		break;
332
333	case T_EMULATION | T_USER:
334		sv.sival_int = va;
335		trapsignal(p, SIGILL, type & ~T_USER, ILL_COPROC, sv);
336		break;
337
338	case T_OVERFLOW | T_USER:
339		sv.sival_int = va;
340		trapsignal(p, SIGFPE, type & ~T_USER, FPE_INTOVF, sv);
341		break;
342
343	case T_CONDITION | T_USER:
344		sv.sival_int = va;
345		trapsignal(p, SIGFPE, type & ~T_USER, FPE_INTDIV, sv);
346		break;
347
348	case T_PRIV_OP | T_USER:
349		sv.sival_int = va;
350		trapsignal(p, SIGILL, type & ~T_USER, ILL_PRVOPC, sv);
351		break;
352
353	case T_PRIV_REG | T_USER:
354		/*
355		 * On PCXS processors, attempting to read control registers
356		 * cr26 and cr27 from userland causes a ``privileged register''
357		 * trap.  Later processors do not restrict read accesses to
358		 * these registers.
359		 */
360		if (cpu_type == hpcxs &&
361		    (opcode & (0xfc1fffe0 | (0x1e << 21))) ==
362		     (0x000008a0 | (0x1a << 21))) { /* mfctl %cr{26,27}, %r# */
363			register_t cr;
364
365			if (((opcode >> 21) & 0x1f) == 27)
366				cr = frame->tf_cr27;	/* cr27 */
367			else
368				cr = 0;			/* cr26 */
369			frame_regmap(frame, opcode & 0x1f) = cr;
370			frame->tf_ipsw |= PSL_N;
371		} else {
372			sv.sival_int = va;
373			trapsignal(p, SIGILL, type & ~T_USER, ILL_PRVREG, sv);
374		}
375		break;
376
377		/* these should never got here */
378	case T_HIGHERPL | T_USER:
379	case T_LOWERPL | T_USER:
380	case T_DATAPID | T_USER:
381		sv.sival_int = va;
382		trapsignal(p, SIGSEGV, access_type, SEGV_ACCERR, sv);
383		break;
384
385	/*
386	 * On PCXS processors, traps T_DATACC, T_DATAPID and T_DATALIGN
387	 * are shared.  We need to sort out the unaligned access situation
388	 * first, before handling this trap as T_DATACC.
389	 */
390	case T_DPROT | T_USER:
391		if (cpu_type == hpcxs) {
392			if (pcxs_unaligned(opcode, va))
393				goto datalign_user;
394			else
395				goto datacc;
396		}
397
398		sv.sival_int = va;
399		trapsignal(p, SIGSEGV, access_type, SEGV_ACCERR, sv);
400		break;
401
402	case T_ITLBMISSNA:
403	case T_ITLBMISSNA | T_USER:
404	case T_DTLBMISSNA:
405	case T_DTLBMISSNA | T_USER:
406		if (space == HPPA_SID_KERNEL)
407			map = kernel_map;
408		else {
409			vm = p->p_vmspace;
410			map = &vm->vm_map;
411		}
412
413		if ((opcode & 0xfc003fc0) == 0x04001340) {
414			/* lpa failure case */
415			frame_regmap(frame, opcode & 0x1f) = 0;
416			frame->tf_ipsw |= PSL_N;
417		} else if ((opcode & 0xfc001f80) == 0x04001180) {
418			int pl;
419
420			/* dig probe[rw]i? insns */
421			if (opcode & 0x2000)
422				pl = (opcode >> 16) & 3;
423			else
424				pl = frame_regmap(frame,
425				    (opcode >> 16) & 0x1f) & 3;
426
427			KERNEL_LOCK();
428
429			if ((type & T_USER && space == HPPA_SID_KERNEL) ||
430			    (frame->tf_iioq_head & 3) != pl ||
431			    (type & T_USER && va >= VM_MAXUSER_ADDRESS) ||
432			    uvm_fault(map, trunc_page(va), 0,
433			     opcode & 0x40? PROT_WRITE : PROT_READ)) {
434				frame_regmap(frame, opcode & 0x1f) = 0;
435				frame->tf_ipsw |= PSL_N;
436			}
437
438			KERNEL_UNLOCK();
439		} else if (type & T_USER) {
440			sv.sival_int = va;
441			trapsignal(p, SIGILL, type & ~T_USER, ILL_ILLTRP, sv);
442		} else
443			panic("trap: %s @ 0x%lx:0x%lx for 0x%x:0x%lx irr 0x%08x",
444			    tts, frame->tf_iisq_head, frame->tf_iioq_head,
445			    space, va, opcode);
446		break;
447
448	case T_IPROT | T_USER:
449	case T_TLB_DIRTY:
450	case T_TLB_DIRTY | T_USER:
451	case T_DATACC:
452	case T_DATACC | T_USER:
453datacc:
454	case T_ITLBMISS:
455	case T_ITLBMISS | T_USER:
456	case T_DTLBMISS:
457	case T_DTLBMISS | T_USER:
458		if (type & T_USER) {
459			if (!uvm_map_inentry(p, &p->p_spinentry, PROC_STACK(p),
460			    "[%s]%d/%d sp=%lx inside %lx-%lx: not MAP_STACK\n",
461			    uvm_map_inentry_sp, p->p_vmspace->vm_map.sserial))
462				goto out;
463		}
464
465		/*
466		 * it could be a kernel map for exec_map faults
467		 */
468		if (space == HPPA_SID_KERNEL)
469			map = kernel_map;
470		else {
471			vm = p->p_vmspace;
472			map = &vm->vm_map;
473		}
474
475		/*
476		 * user faults out of user addr space are always a fail,
477		 * this happens on va >= VM_MAXUSER_ADDRESS, where
478		 * space id will be zero and therefore cause
479		 * a misbehave lower in the code.
480		 *
481		 * also check that faulted space id matches the curproc.
482		 */
483		if ((type & T_USER && va >= VM_MAXUSER_ADDRESS) ||
484		   (type & T_USER && map->pmap->pm_space != space)) {
485			sv.sival_int = va;
486			trapsignal(p, SIGSEGV, access_type, SEGV_MAPERR, sv);
487			break;
488		}
489
490		KERNEL_LOCK();
491
492		ret = uvm_fault(map, trunc_page(va), 0, access_type);
493
494		/*
495		 * If this was a stack access we keep track of the maximum
496		 * accessed stack size.  Also, if uvm_fault gets a protection
497		 * failure it is due to accessing the stack region outside
498		 * the current limit and we need to reflect that as an access
499		 * error.
500		 */
501		if (ret == 0 && space != HPPA_SID_KERNEL &&
502		    va < (vaddr_t)vm->vm_minsaddr)
503			uvm_grow(p, va);
504
505		KERNEL_UNLOCK();
506
507		if (ret != 0) {
508			if (type & T_USER) {
509				int signal, sicode;
510
511				signal = SIGSEGV;
512				sicode = SEGV_MAPERR;
513				if (ret == EACCES)
514					sicode = SEGV_ACCERR;
515				if (ret == EIO) {
516					signal = SIGBUS;
517					sicode = BUS_OBJERR;
518				}
519				sv.sival_int = va;
520				trapsignal(p, signal, access_type, sicode, sv);
521			} else {
522				if (p && p->p_addr->u_pcb.pcb_onfault) {
523					frame->tf_iioq_tail = 4 +
524					    (frame->tf_iioq_head =
525						p->p_addr->u_pcb.pcb_onfault);
526#ifdef DDB
527					frame->tf_iir = 0;
528#endif
529				} else {
530					panic("trap: "
531					    "uvm_fault(%p, %lx, 0, %d): %d",
532					    map, va, access_type, ret);
533				}
534			}
535		}
536		break;
537
538	case T_DATAPID:
539		/* This should never happen, unless within spcopy() */
540		if (p && p->p_addr->u_pcb.pcb_onfault) {
541			frame->tf_iioq_tail = 4 +
542			    (frame->tf_iioq_head =
543				p->p_addr->u_pcb.pcb_onfault);
544#ifdef DDB
545			frame->tf_iir = 0;
546#endif
547		} else
548			goto dead_end;
549		break;
550
551	case T_DATALIGN | T_USER:
552datalign_user:
553		sv.sival_int = va;
554		trapsignal(p, SIGBUS, access_type, BUS_ADRALN, sv);
555		break;
556
557	case T_INTERRUPT:
558	case T_INTERRUPT | T_USER:
559		cpu_intr(frame);
560		break;
561
562	case T_CONDITION:
563		panic("trap: divide by zero in the kernel");
564		break;
565
566	case T_ILLEGAL:
567	case T_ILLEGAL | T_USER:
568		/* see if it's a SPOP1,,0 */
569		if ((opcode & 0xfffffe00) == 0x10000200) {
570			frame_regmap(frame, opcode & 0x1f) = 0;
571			frame->tf_ipsw |= PSL_N;
572			break;
573		}
574		if (type & T_USER) {
575			sv.sival_int = va;
576			trapsignal(p, SIGILL, type & ~T_USER, ILL_ILLOPC, sv);
577			break;
578		}
579		/* FALLTHROUGH */
580
581	/*
582	 * On PCXS processors, traps T_DATACC, T_DATAPID and T_DATALIGN
583	 * are shared.  We need to sort out the unaligned access situation
584	 * first, before handling this trap as T_DATACC.
585	 */
586	case T_DPROT:
587		if (cpu_type == hpcxs) {
588			if (pcxs_unaligned(opcode, va))
589				goto dead_end;
590			else
591				goto datacc;
592		}
593		/* FALLTHROUGH to unimplemented */
594
595	case T_LOWERPL:
596	case T_IPROT:
597	case T_OVERFLOW:
598	case T_HIGHERPL:
599	case T_TAKENBR:
600	case T_POWERFAIL:
601	case T_LPMC:
602	case T_PAGEREF:
603		/* FALLTHROUGH to unimplemented */
604	default:
605#ifdef TRAPDEBUG
606		if (db_ktrap(type, va, frame))
607			return;
608#endif
609		panic("trap: unimplemented \'%s\' (%d)", tts, trapnum);
610	}
611
612#ifdef DIAGNOSTIC
613	if (curcpu()->ci_cpl != oldcpl)
614		printf("WARNING: SPL (%d) NOT LOWERED ON "
615		    "TRAP (%d) EXIT\n", curcpu()->ci_cpl, trapnum);
616#endif
617
618	if (trapnum != T_INTERRUPT)
619		splx(curcpu()->ci_cpl);	/* process softints */
620
621	/*
622	 * in case we were interrupted from the syscall gate page
623	 * treat this as we were not really running user code no more
624	 * for weird things start to happen on return to the userland
625	 * and also see a note in locore.S:TLABEL(all)
626	 */
627	if ((type & T_USER) && !(frame->tf_iisq_head == HPPA_SID_KERNEL &&
628	    (frame->tf_iioq_head & ~PAGE_MASK) == SYSCALLGATE)) {
629		ast(p);
630out:
631		userret(p);
632	}
633}
634
635void
636child_return(void *arg)
637{
638	struct proc *p = (struct proc *)arg;
639	struct trapframe *tf = p->p_md.md_regs;
640
641	/*
642	 * Set up return value registers as libc:fork() expects
643	 */
644	tf->tf_ret0 = 0;
645	tf->tf_ret1 = 1;	/* ischild */
646	tf->tf_t1 = 0;		/* errno */
647
648	KERNEL_UNLOCK();
649
650	ast(p);
651
652	mi_child_return(p);
653}
654
655#ifdef PTRACE
656
657#include <sys/ptrace.h>
658
659int	ss_get_value(struct proc *p, vaddr_t addr, u_int *value);
660int	ss_put_value(struct proc *p, vaddr_t addr, u_int value);
661
662int
663ss_get_value(struct proc *p, vaddr_t addr, u_int *value)
664{
665	struct uio uio;
666	struct iovec iov;
667
668	iov.iov_base = (caddr_t)value;
669	iov.iov_len = sizeof(u_int);
670	uio.uio_iov = &iov;
671	uio.uio_iovcnt = 1;
672	uio.uio_offset = (off_t)addr;
673	uio.uio_resid = sizeof(u_int);
674	uio.uio_segflg = UIO_SYSSPACE;
675	uio.uio_rw = UIO_READ;
676	uio.uio_procp = curproc;
677	return (process_domem(curproc, p->p_p, &uio, PT_READ_I));
678}
679
680int
681ss_put_value(struct proc *p, vaddr_t addr, u_int value)
682{
683	struct uio uio;
684	struct iovec iov;
685
686	iov.iov_base = (caddr_t)&value;
687	iov.iov_len = sizeof(u_int);
688	uio.uio_iov = &iov;
689	uio.uio_iovcnt = 1;
690	uio.uio_offset = (off_t)addr;
691	uio.uio_resid = sizeof(u_int);
692	uio.uio_segflg = UIO_SYSSPACE;
693	uio.uio_rw = UIO_WRITE;
694	uio.uio_procp = curproc;
695	return (process_domem(curproc, p->p_p, &uio, PT_WRITE_I));
696}
697
698void
699ss_clear_breakpoints(struct proc *p)
700{
701	/* Restore origional instructions. */
702	if (p->p_md.md_bpva != 0) {
703		ss_put_value(p, p->p_md.md_bpva, p->p_md.md_bpsave[0]);
704		ss_put_value(p, p->p_md.md_bpva + 4, p->p_md.md_bpsave[1]);
705		p->p_md.md_bpva = 0;
706	}
707}
708
709int
710process_sstep(struct proc *p, int sstep)
711{
712	int error;
713
714	ss_clear_breakpoints(p);
715
716	if (sstep == 0) {
717		p->p_md.md_regs->tf_ipsw &= ~PSL_T;
718		return (0);
719	}
720
721	/*
722	 * Don't touch the syscall gateway page.  Instead, insert a
723	 * breakpoint where we're supposed to return.
724	 */
725	if ((p->p_md.md_regs->tf_iioq_tail & ~PAGE_MASK) == SYSCALLGATE)
726		p->p_md.md_bpva = p->p_md.md_regs->tf_r31 & ~HPPA_PC_PRIV_MASK;
727	else
728		p->p_md.md_bpva = p->p_md.md_regs->tf_iioq_tail & ~HPPA_PC_PRIV_MASK;
729
730	/*
731	 * Insert two breakpoint instructions; the first one might be
732	 * nullified.  Of course we need to save two instruction
733	 * first.
734	 */
735
736	error = ss_get_value(p, p->p_md.md_bpva, &p->p_md.md_bpsave[0]);
737	if (error)
738		return (error);
739	error = ss_get_value(p, p->p_md.md_bpva + 4, &p->p_md.md_bpsave[1]);
740	if (error)
741		return (error);
742
743	error = ss_put_value(p, p->p_md.md_bpva, SSBREAKPOINT);
744	if (error)
745		return (error);
746	error = ss_put_value(p, p->p_md.md_bpva + 4, SSBREAKPOINT);
747	if (error)
748		return (error);
749
750	if ((p->p_md.md_regs->tf_iioq_tail & ~PAGE_MASK) != SYSCALLGATE)
751		p->p_md.md_regs->tf_ipsw |= PSL_T;
752	else
753		p->p_md.md_regs->tf_ipsw &= ~PSL_T;
754
755	return (0);
756}
757
758#endif	/* PTRACE */
759
760void	syscall(struct trapframe *frame);
761
762/*
763 * call actual syscall routine
764 */
765void
766syscall(struct trapframe *frame)
767{
768	register struct proc *p = curproc;
769	register const struct sysent *callp;
770	int retq, nsys, code, argsize, argoff, error;
771	register_t args[8], rval[2];
772#ifdef DIAGNOSTIC
773	int oldcpl = curcpu()->ci_cpl;
774#endif
775
776	uvmexp.syscalls++;
777
778	if (!USERMODE(frame->tf_iioq_head))
779		panic("syscall");
780
781	p->p_md.md_regs = frame;
782	nsys = p->p_p->ps_emul->e_nsysent;
783	callp = p->p_p->ps_emul->e_sysent;
784
785	argoff = 4; retq = 0;
786	switch (code = frame->tf_t1) {
787	case SYS_syscall:
788		code = frame->tf_arg0;
789		args[0] = frame->tf_arg1;
790		args[1] = frame->tf_arg2;
791		args[2] = frame->tf_arg3;
792		argoff = 3;
793		break;
794	case SYS___syscall:
795		if (callp != sysent)
796			break;
797		/*
798		 * this works, because quads get magically swapped
799		 * due to the args being laid backwards on the stack
800		 * and then copied in words
801		 */
802		code = frame->tf_arg0;
803		args[0] = frame->tf_arg2;
804		args[1] = frame->tf_arg3;
805		argoff = 2;
806		retq = 1;
807		break;
808	default:
809		args[0] = frame->tf_arg0;
810		args[1] = frame->tf_arg1;
811		args[2] = frame->tf_arg2;
812		args[3] = frame->tf_arg3;
813		break;
814	}
815
816	if (code < 0 || code >= nsys)
817		callp += p->p_p->ps_emul->e_nosys;	/* bad syscall # */
818	else
819		callp += code;
820
821	if ((argsize = callp->sy_argsize)) {
822		register_t *s, *e, t;
823		int i;
824
825		argsize -= argoff * 4;
826		if (argsize > 0) {
827			i = argsize / 4;
828			if ((error = copyin((void *)(frame->tf_sp +
829			    HPPA_FRAME_ARG(4 + i - 1)), args + argoff,
830			    argsize)))
831				goto bad;
832			/* reverse the args[] entries */
833			s = args + argoff;
834			e = s + i - 1;
835			while (s < e) {
836				t = *s;
837				*s = *e;
838				*e = t;
839				s++, e--;
840			}
841		}
842
843		/*
844		 * System calls with 64-bit arguments need a word swap
845		 * due to the order of the arguments on the stack.
846		 */
847		i = 0;
848		switch (code) {
849		case SYS_lseek:		retq = 0;
850		case SYS_truncate:
851		case SYS_ftruncate:	i = 2;	break;
852		case SYS_preadv:
853		case SYS_pwritev:
854		case SYS_pread:
855		case SYS_pwrite:	i = 4;	break;
856		case SYS_mquery:
857		case SYS_mmap:		i = 6;	break;
858		}
859
860		if (i) {
861			t = args[i];
862			args[i] = args[i + 1];
863			args[i + 1] = t;
864		}
865	}
866
867	rval[0] = 0;
868	rval[1] = frame->tf_ret1;
869
870	error = mi_syscall(p, code, callp, args, rval);
871
872	switch (error) {
873	case 0:
874		frame->tf_ret0 = rval[0];
875		frame->tf_ret1 = rval[!retq];
876		frame->tf_t1 = 0;
877		break;
878	case ERESTART:
879		frame->tf_iioq_head -= 12;
880		frame->tf_iioq_tail -= 12;
881	case EJUSTRETURN:
882		break;
883	default:
884	bad:
885		frame->tf_t1 = error;
886		frame->tf_ret0 = error;
887		frame->tf_ret1 = 0;
888		break;
889	}
890
891	ast(p);
892
893	mi_syscall_return(p, code, error, rval);
894
895#ifdef DIAGNOSTIC
896	if (curcpu()->ci_cpl != oldcpl) {
897		printf("WARNING: SPL (0x%x) NOT LOWERED ON "
898		    "syscall(0x%x, 0x%lx, 0x%lx, 0x%lx...) EXIT, PID %d\n",
899		    curcpu()->ci_cpl, code, args[0], args[1], args[2],
900		    p->p_p->ps_pid);
901		curcpu()->ci_cpl = oldcpl;
902	}
903#endif
904	splx(curcpu()->ci_cpl);	/* process softints */
905}
906
907/*
908 * Decide if opcode `opcode' accessing virtual address `va' caused an
909 * unaligned trap. Returns zero if the access is correctly aligned.
910 * Used on PCXS processors to sort out exception causes.
911 */
912int
913pcxs_unaligned(u_int opcode, vaddr_t va)
914{
915	u_int mbz_bits;
916
917	/*
918	 * Exit early if the va is obviously aligned enough.
919	 */
920	if ((va & 0x0f) == 0)
921		return 0;
922
923	mbz_bits = 0;
924
925	/*
926	 * Only load and store instructions can cause unaligned access.
927	 * There are three opcode patterns to look for:
928	 * - canonical load/store
929	 * - load/store short or indexed
930	 * - coprocessor load/store
931	 */
932
933	if ((opcode & 0xd0000000) == 0x40000000) {
934		switch ((opcode >> 26) & 0x03) {
935		case 0x00:	/* ldb, stb */
936			mbz_bits = 0x00;
937			break;
938		case 0x01:	/* ldh, sth */
939			mbz_bits = 0x01;
940			break;
941		case 0x02:	/* ldw, stw */
942		case 0x03:	/* ldwm, stwm */
943			mbz_bits = 0x03;
944			break;
945		}
946	} else
947
948	if ((opcode & 0xfc000000) == 0x0c000000) {
949		switch ((opcode >> 6) & 0x0f) {
950		case 0x01:	/* ldhx, ldhs */
951			mbz_bits = 0x01;
952			break;
953		case 0x02:	/* ldwx, ldws */
954			mbz_bits = 0x03;
955			break;
956		case 0x07:	/* ldcwx, ldcws */
957			mbz_bits = 0x0f;
958			break;
959		case 0x09:
960			if ((opcode & (1 << 12)) != 0)	/* sths */
961				mbz_bits = 0x01;
962			break;
963		case 0x0a:
964			if ((opcode & (1 << 12)) != 0)	/* stws */
965				mbz_bits = 0x03;
966			break;
967		}
968	} else
969
970	if ((opcode & 0xf4000000) == 0x24000000) {
971		if ((opcode & (1 << 27)) != 0) {
972			/* cldwx, cstwx, cldws, cstws */
973			mbz_bits = 0x03;
974		} else {
975			/* clddx, cstdx, cldds, cstds */
976			mbz_bits = 0x07;
977		}
978	}
979
980	return (va & mbz_bits);
981}
982