1/*	$NetBSD$	*/
2
3/*-
4 * Copyright (c) 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD$");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/sa.h>
35#include <sys/lwp.h>
36#include <sys/savar.h>
37#include <sys/proc.h>
38#include <sys/ras.h>
39#include <sys/cpu.h>
40
41#include <sys/kernel.h>
42
43#include <uvm/uvm_extern.h>
44
45#include <machine/cpufunc.h>
46#include <machine/pcb.h>
47#include <machine/mcontext.h>
48#include <hppa/hppa/machdep.h>
49
50/* the following is used externally (sysctl_hw) */
51char	machine_arch[] = MACHINE_ARCH;	/* from <machine/param.h> */
52
53/*
54 * XXX fredette - much of the TLB trap handler setup should
55 * probably be moved here from hp700/hp700/machdep.c, seeing
56 * that there's related code already in hppa/hppa/trap.S.
57 */
58
59
60/*
61 * Scheduler activations upcall frame.  Pushed onto user stack before
62 * calling an SA upcall.
63 */
64
65struct saframe {
66	/* first 4 arguments passed in registers on entry to upcallcode */
67	void *		sa_arg;
68	int		sa_interrupted;	/* arg3 */
69	int		sa_events;	/* arg2 */
70	struct sa_t **	sa_sas;		/* arg1 */
71	int		sa_type;	/* arg0 */
72};
73
74/*
75 * cpu_upcall:
76 *
77 *      Send an an upcall to userland.
78 */
79
80void
81cpu_upcall(struct lwp *l, int type, int nevents, int ninterrupted,
82	   void *sas, void *ap, void *sp, sa_upcall_t upcall)
83{
84	struct saframe *sf, frame;
85	struct proc *p = l->l_proc;
86	struct trapframe *tf;
87	uintptr_t upva;
88	vaddr_t va;
89
90	tf = (struct trapframe *)l->l_md.md_regs;
91
92	frame.sa_type = type;
93	frame.sa_sas = sas;
94	frame.sa_events = nevents;
95	frame.sa_interrupted = ninterrupted;
96	frame.sa_arg = ap;
97
98	pmap_activate(l);
99	va = HPPA_FRAME_ROUND((uintptr_t)sp + sizeof(frame) + HPPA_FRAME_SIZE);
100	sf = (void *)(va - 32 - sizeof(frame));
101	if (copyout(&frame, sf, sizeof(frame)) != 0) {
102		/* Copying onto the stack didn't work. Die. */
103		mutex_enter(p->p_lock);
104		sigexit(l, SIGILL);
105		/* NOTREACHED */
106	}
107
108	/*
109	 * Deal with the upcall function pointer being a PLABEL.
110	 */
111
112	upva = (uintptr_t)upcall;
113	if (upva & 2) {
114		upva &= ~3;
115		if (copyin((void *)(upva + 4), &tf->tf_t4, 4)) {
116			printf("copyin t4 failed\n");
117			mutex_enter(p->p_lock);
118			sigexit(l, SIGILL);
119			/* NOTREACHED */
120		}
121		if (copyin((void *)upva, &upcall, 4)) {
122			printf("copyin upcall failed\n");
123			mutex_enter(p->p_lock);
124			sigexit(l, SIGILL);
125			/* NOTREACHED */
126		}
127	}
128
129	tf->tf_iioq_head = (uintptr_t)upcall | HPPA_PC_PRIV_USER;
130	tf->tf_iioq_tail = tf->tf_iioq_head + 4;
131
132	tf->tf_sp = va;
133	tf->tf_arg0 = type;
134	tf->tf_arg1 = (uintptr_t)sas;
135	tf->tf_arg2 = nevents;
136	tf->tf_arg3 = ninterrupted;
137	tf->tf_rp = 0;
138}
139
140void
141cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
142{
143	struct trapframe *tf = l->l_md.md_regs;
144	struct pcb *pcb = lwp_getpcb(l);
145	__greg_t *gr = mcp->__gregs;
146	__greg_t ras_pc;
147
148	gr[0]  = tf->tf_ipsw;
149	gr[1]  = tf->tf_r1;
150	gr[2]  = tf->tf_rp;
151	gr[3]  = tf->tf_r3;
152	gr[4]  = tf->tf_r4;
153	gr[5]  = tf->tf_r5;
154	gr[6]  = tf->tf_r6;
155	gr[7]  = tf->tf_r7;
156	gr[8]  = tf->tf_r8;
157	gr[9]  = tf->tf_r9;
158	gr[10] = tf->tf_r10;
159	gr[11] = tf->tf_r11;
160	gr[12] = tf->tf_r12;
161	gr[13] = tf->tf_r13;
162	gr[14] = tf->tf_r14;
163	gr[15] = tf->tf_r15;
164	gr[16] = tf->tf_r16;
165	gr[17] = tf->tf_r17;
166	gr[18] = tf->tf_r18;
167	gr[19] = tf->tf_t4;
168	gr[20] = tf->tf_t3;
169	gr[21] = tf->tf_t2;
170	gr[22] = tf->tf_t1;
171	gr[23] = tf->tf_arg3;
172	gr[24] = tf->tf_arg2;
173	gr[25] = tf->tf_arg1;
174	gr[26] = tf->tf_arg0;
175	gr[27] = tf->tf_dp;
176	gr[28] = tf->tf_ret0;
177	gr[29] = tf->tf_ret1;
178	gr[30] = tf->tf_sp;
179	gr[31] = tf->tf_r31;
180
181	gr[_REG_SAR] = tf->tf_sar;
182	gr[_REG_PCSQH] = tf->tf_iisq_head;
183	gr[_REG_PCSQT] = tf->tf_iisq_tail;
184	gr[_REG_PCOQH] = tf->tf_iioq_head;
185	gr[_REG_PCOQT] = tf->tf_iioq_tail;
186	gr[_REG_SR0] = tf->tf_sr0;
187	gr[_REG_SR1] = tf->tf_sr1;
188	gr[_REG_SR2] = tf->tf_sr2;
189	gr[_REG_SR3] = tf->tf_sr3;
190	gr[_REG_SR4] = tf->tf_sr4;
191	gr[_REG_CR27] = tf->tf_cr27;
192#if 0
193	gr[_REG_CR26] = tf->tf_cr26;
194#endif
195
196	ras_pc = (__greg_t)ras_lookup(l->l_proc,
197	    (void *)(gr[_REG_PCOQH] & ~HPPA_PC_PRIV_MASK));
198	if (ras_pc != -1) {
199		ras_pc |= HPPA_PC_PRIV_USER;
200		gr[_REG_PCOQH] = ras_pc;
201		gr[_REG_PCOQT] = ras_pc + 4;
202	}
203
204	*flags |= _UC_CPU | _UC_TLSBASE;
205
206	if (l->l_md.md_flags & 0) {
207		return;
208	}
209
210	hppa_fpu_flush(l);
211	memcpy(&mcp->__fpregs, pcb->pcb_fpregs, sizeof(mcp->__fpregs));
212	*flags |= _UC_FPU;
213}
214
215int
216cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
217{
218	const __greg_t *gr = mcp->__gregs;
219
220	if ((gr[_REG_PSW] & (PSW_MBS|PSW_MBZ)) != PSW_MBS) {
221		return EINVAL;
222	}
223
224#if 0
225	/*
226	 * XXX
227	 * Force the space regs and priviledge bits to
228	 * the right values in the trapframe for now.
229	 */
230
231	if (gr[_REG_PCSQH] != pmap_sid(pmap, gr[_REG_PCOQH])) {
232		return EINVAL;
233	}
234
235	if (gr[_REG_PCSQT] != pmap_sid(pmap, gr[_REG_PCOQT])) {
236		return EINVAL;
237	}
238
239	if (gr[_REG_PCOQH] < 0xc0000020 &&
240	    (gr[_REG_PCOQH] & HPPA_PC_PRIV_MASK) != HPPA_PC_PRIV_USER) {
241		return EINVAL;
242	}
243
244	if (gr[_REG_PCOQT] < 0xc0000020 &&
245	    (gr[_REG_PCOQT] & HPPA_PC_PRIV_MASK) != HPPA_PC_PRIV_USER) {
246		return EINVAL;
247	}
248#endif
249
250	return 0;
251}
252
253int
254cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
255{
256	struct trapframe *tf = l->l_md.md_regs;
257	struct proc *p = l->l_proc;
258	struct pmap *pmap = p->p_vmspace->vm_map.pmap;
259	const __greg_t *gr = mcp->__gregs;
260	int error;
261
262	if ((flags & _UC_CPU) != 0) {
263		error = cpu_mcontext_validate(l, mcp);
264		if (error)
265			return error;
266
267		tf->tf_ipsw	= gr[0] |
268		    (hppa_cpu_ispa20_p() ? PSW_O : 0);
269		tf->tf_r1	= gr[1];
270		tf->tf_rp	= gr[2];
271		tf->tf_r3	= gr[3];
272		tf->tf_r4	= gr[4];
273		tf->tf_r5	= gr[5];
274		tf->tf_r6	= gr[6];
275		tf->tf_r7	= gr[7];
276		tf->tf_r8	= gr[8];
277		tf->tf_r9	= gr[9];
278		tf->tf_r10	= gr[10];
279		tf->tf_r11	= gr[11];
280		tf->tf_r12	= gr[12];
281		tf->tf_r13	= gr[13];
282		tf->tf_r14	= gr[14];
283		tf->tf_r15	= gr[15];
284		tf->tf_r16	= gr[16];
285		tf->tf_r17	= gr[17];
286		tf->tf_r18	= gr[18];
287		tf->tf_t4	= gr[19];
288		tf->tf_t3	= gr[20];
289		tf->tf_t2	= gr[21];
290		tf->tf_t1	= gr[22];
291		tf->tf_arg3	= gr[23];
292		tf->tf_arg2	= gr[24];
293		tf->tf_arg1	= gr[25];
294		tf->tf_arg0	= gr[26];
295		tf->tf_dp	= gr[27];
296		tf->tf_ret0	= gr[28];
297		tf->tf_ret1	= gr[29];
298		tf->tf_sp	= gr[30];
299		tf->tf_r31	= gr[31];
300		tf->tf_sar	= gr[_REG_SAR];
301		tf->tf_iisq_head = pmap_sid(pmap, gr[_REG_PCOQH]);
302		tf->tf_iisq_tail = pmap_sid(pmap, gr[_REG_PCOQT]);
303
304		tf->tf_iioq_head = gr[_REG_PCOQH];
305		tf->tf_iioq_tail = gr[_REG_PCOQT];
306
307		if (tf->tf_iioq_head >= 0xc0000020) {
308			tf->tf_iioq_head &= ~HPPA_PC_PRIV_MASK;
309		} else {
310			tf->tf_iioq_head |= HPPA_PC_PRIV_USER;
311		}
312		if (tf->tf_iioq_tail >= 0xc0000020) {
313			tf->tf_iioq_tail &= ~HPPA_PC_PRIV_MASK;
314		} else {
315			tf->tf_iioq_tail |= HPPA_PC_PRIV_USER;
316		}
317
318#if 0
319		tf->tf_sr0	= gr[_REG_SR0];
320		tf->tf_sr1	= gr[_REG_SR1];
321		tf->tf_sr2	= gr[_REG_SR2];
322		tf->tf_sr3	= gr[_REG_SR3];
323		tf->tf_sr4	= gr[_REG_SR4];
324		tf->tf_cr26	= gr[_REG_CR26];
325#endif
326	}
327
328	/* Restore the private thread context */
329	if (flags & _UC_TLSBASE) {
330		lwp_setprivate(l, (void *)(uintptr_t)gr[_REG_CR27]);
331		tf->tf_cr27	= gr[_REG_CR27];
332	}
333
334	/* Restore the floating point registers */
335	if ((flags & _UC_FPU) != 0) {
336		struct pcb *pcb = lwp_getpcb(l);
337
338		hppa_fpu_flush(l);
339		memcpy(pcb->pcb_fpregs, &mcp->__fpregs, sizeof(mcp->__fpregs));
340	}
341
342	mutex_enter(p->p_lock);
343	if (flags & _UC_SETSTACK)
344		l->l_sigstk.ss_flags |= SS_ONSTACK;
345	if (flags & _UC_CLRSTACK)
346		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
347	mutex_exit(p->p_lock);
348
349	return 0;
350}
351
352/*
353 * Do RAS processing.
354 */
355
356void
357hppa_ras(struct lwp *l)
358{
359	struct proc *p;
360	struct trapframe *tf;
361	intptr_t rasaddr;
362
363	p = l->l_proc;
364	tf = l->l_md.md_regs;
365	rasaddr = (intptr_t)ras_lookup(p, (void *)tf->tf_iioq_head);
366	if (rasaddr != -1) {
367		rasaddr |= HPPA_PC_PRIV_USER;
368		tf->tf_iioq_head = rasaddr;
369		tf->tf_iioq_tail = rasaddr + 4;
370	}
371}
372
373/*
374 * Preempt the current LWP if in interrupt from user mode,
375 * or after the current trap/syscall if in system mode.
376 */
377void
378cpu_need_resched(struct cpu_info *ci, int flags)
379{
380	bool immed = (flags & RESCHED_IMMED) != 0;
381
382	if (ci->ci_want_resched && !immed)
383		return;
384	ci->ci_want_resched = 1;
385	setsoftast(ci->ci_data.cpu_onproc);
386
387#ifdef MULTIPROCESSOR
388	if (ci->ci_curlwp != ci->ci_data.cpu_idlelwp) {
389		if (immed && ci != curcpu()) {
390			/* XXX send IPI */
391		}
392	}
393#endif
394}
395