1/*
2 * Copyright 2006, Ingo Weinhold <bonefish@cs.tu-berlin.de>.
3 * All rights reserved. Distributed under the terms of the MIT License.
4 *
5 * Copyright 2003, Travis Geiselbrecht. All rights reserved.
6 * Distributed under the terms of the NewOS License.
7 */
8#include <asm_defs.h>
9
10/*	General exception handling concept:
11
12	The PPC architecture specifies entry point offsets for the various
13	exceptions in the first two physical pages. We put a short piece of code
14	(VEC_ENTRY()) into each exception vector. It calls exception_vector_common,
15	which is defined in the unused space at the beginning of the first physical
16	page. It re-enables address translation and calls ppc_exception_tail which
17	lies in the kernel. It dumps an iframe and invokes ppc_exception_entry()
18	(arch_int.cpp), which handles the exception and returns eventually.
19	The registers are restored from the iframe and we return from the
20	interrupt.
21
22	algorithm overview:
23
24	* VEC_ENTRY
25	* exception_vector_common
26	* ppc_exception_tail
27		- dump iframe
28		- ppc_exception_entry()
29		- restore registers and return from interrupt
30
31	Here we use the following SPRG registers, which are at the disposal of the
32	operating system:
33	* SPRG0: Physical address pointer to a struct cpu_exception_context
34			 for the current CPU. The structure contains helpful pointers
35			 as well as some scratch memory for temporarily saving registers.
36	* SPRG1: Scratch.
37
38	struct cpu_exception_context (defined in arch_int.h):
39	offset 0:  virtual address of the exception handler routine in the kernel
40	offset 4:  virtual address of the exception context
41	offset 8:  kernel stack for the current thread
42	offset 12: start of scratch memory for saving registers etc.
43
44	algorithm in detail:
45
46	* VEC_ENTRY
47		- save r1 in SPRG1 and load cpu_exception_context into r1
48		- save r0, save LR in r0
49	* exception_vector_common
50		- params:
51			. r0: old LR
52			. r1: exception context (physical address)
53			. SPRG1: original r1
54		- save r0-3
55		- load virtual exception context address to r1
56		- turn on BAT for exception vector code
57		- turn on address translation
58		- get exception vector offset from LR
59	* ppc_exception_tail
60		- params:
61			. r1: exception context (virtual address)
62			. r3: exception vector offset
63			. SPRG1: original r1
64		- turn off BAT
65		- get kernel stack pointer
66		- dump iframe
67		- ppc_exception_entry()
68		- restore registers and return from interrupt
69 */
70
71
72/* exception vector definitions */
73
74/* code in each exception vector */
75#define VEC_ENTRY() \
76	mtsprg1	%r1					; /* temporarily save r1 in SPRG1 */		\
77	mfsprg0	%r1					; /* ppc_cpu_exception_context* -> r1 */	\
78	stw		%r0, 16(%r1)		; /* save r0 */								\
79	mflr	%r0					; /* save LR in r0 */						\
80	bl		exception_vector_common	; /* continue with the common part */
81
82/* defines an exception vector */
83#define DEFINE_VECTOR(offset, name) 	\
84.skip	offset - (. - __irqvec_start);	\
85FUNCTION(name):							\
86	VEC_ENTRY()
87
88
89.global __irqvec_start
90__irqvec_start:
91	.long	0
92
93/* Called by the exception vector code.
94 * LR:    Points to the end of the exception vector code we're coming from.
95 * r0:    original LR
96 * r1:    ppc_cpu_exception_context* (physical address)
97 * SPRG1: original r1
98 */
99exception_vector_common:
100	stw		%r0, 20(%r1)			/* save original LR */
101	stw		%r2, 24(%r1)			/* save r2 */
102	stw		%r3, 28(%r1)			/* save r3 */
103
104	/* load the virtual address of the ppc_cpu_exception_context for this CPU */
105	lwz		%r1, 4(%r1)
106
107	/* Address translation is turned off. We map this code via BAT, turn on
108	   address translation, and continue in the kernel proper. */
109	li		%r0, 0x10|0x2			/* BATL_MC | BATL_PP_RW */
110	mtibatl	0, %r0					/* load lower word of the instruction BAT */
111	li		%r0, 0x2				/* BEPI = 0, BL = 0 (128 KB), BATU_VS */
112	mtibatu	0, %r0					/* load upper word of the instruction BAT */
113	isync
114	sync
115
116	/* turn on address translation */
117	mfsrr1	%r0						/* load saved msr */
118	rlwinm	%r0, %r0, 28, 30, 31	/* extract mmu bits */
119	mfmsr	%r3						/* load the current msr */
120	rlwimi  %r3, %r0, 4, 26, 27		/* merge the mmu bits with the current msr */
121	li		%r0, 1
122	rlwimi  %r3, %r0, 13, 18, 18	/* turn on FPU, too */
123	mtmsr	%r3						/* load new msr (turning the mmu back on) */
124	isync
125
126	/* Get LR -- it points to the end of the exception vector code. We adjust it
127	   to point to the beginning and can use it to identify the vector later. */
128	mflr	%r3
129	subi	%r3, %r3, 20		/* 5 instructions */
130
131	/* jump to kernel code (ppc_exception_tail) */
132	lwz		%r2, 0(%r1)
133	mtlr	%r2
134	blr
135
136
137DEFINE_VECTOR(0x100, system_reset_exception)
138DEFINE_VECTOR(0x200, machine_check_exception)
139DEFINE_VECTOR(0x300, DSI_exception)
140DEFINE_VECTOR(0x400, ISI_exception)
141DEFINE_VECTOR(0x500, external_interrupt_exception)
142DEFINE_VECTOR(0x600, alignment_exception)
143DEFINE_VECTOR(0x700, program_exception)
144DEFINE_VECTOR(0x800, FP_unavailable_exception)
145DEFINE_VECTOR(0x900, decrementer_exception)
146DEFINE_VECTOR(0xc00, system_call_exception)
147DEFINE_VECTOR(0xd00, trace_exception)
148DEFINE_VECTOR(0xe00, FP_assist_exception)
149DEFINE_VECTOR(0xf00, perf_monitor_exception)
150DEFINE_VECTOR(0xf20, altivec_unavailable_exception)
151DEFINE_VECTOR(0x1000, ITLB_miss_exception)
152DEFINE_VECTOR(0x1100, DTLB_miss_on_load_exception)
153DEFINE_VECTOR(0x1200, DTLB_miss_on_store_exception)
154DEFINE_VECTOR(0x1300, instruction_address_breakpoint_exception)
155DEFINE_VECTOR(0x1400, system_management_exception)
156DEFINE_VECTOR(0x1600, altivec_assist_exception)
157DEFINE_VECTOR(0x1700, thermal_management_exception)
158
159.global __irqvec_end
160__irqvec_end:
161
162
163/* This is where exception_vector_common continues. We're in the kernel here.
164   r1:    ppc_cpu_exception_context* (virtual address)
165   r3:    exception vector offset
166   SPRG1: original r1
167 */
168FUNCTION(ppc_exception_tail):
169	/* turn off BAT */
170	li		%r2, 0
171	mtibatu	0, %r2
172	mtibatl	0, %r2
173	isync
174	sync
175
176	/* save CR */
177	mfcr	%r0
178
179	mfsrr1	%r2					/* load saved msr */
180	andi.	%r2, %r2, (1 << 14)	/* see if it was in kernel mode */
181	beq		.kernel				/* yep */
182
183	/* We come from userland. Load the kernel stack top address for the current
184	   userland thread. */
185	mr		%r2, %r1
186	lwz		%r1, 8(%r1)
187	b		.restore_stack_end
188
189.kernel:
190	mr		%r2, %r1
191	mfsprg1	%r1
192
193.restore_stack_end:
194	/* now r2 points to the ppc_cpu_exception_context, r1 to the kernel stack */
195	/* restore the CR, it was messed up in the previous compare */
196	mtcrf	0xff, %r0
197
198	/* align r1 to 8 bytes, so the iframe will be aligned too */
199	rlwinm	%r1, %r1, 0, 0, 28
200
201	/* save the registers */
202	bl		__save_regs
203
204	/* iframe pointer to r4 and a backup to r20 */
205	mr		%r4, %r1
206	mr		%r20, %r1
207
208	/* adjust the stack pointer for ABI compatibility */
209	subi	%r1, %r1, 8				/* make sure there's space for the previous
210									   frame pointer and the return address */
211	rlwinm	%r1, %r1, 0, 0, 27		/* 16 byte align the stack pointer */
212	li		%r0, 0
213	stw		%r0, 0(%r1)				/* previous frame pointer: NULL */
214		/* 4(%r1) is room for the return address to be filled in by the
215		   called function. */
216
217	/* r3: exception vector offset
218	   r4: iframe pointer */
219	bl 		ppc_exception_entry
220
221	/* move the iframe to r1 */
222	mr		%r1, %r20
223
224	b		__restore_regs_and_rfi
225
226
227/* called by ppc_exception_tail
228 * register expectations:
229 *  r1:        stack
230 *  r2:        ppc_cpu_exception_context*
231 *  SPRG1:     original r1
232 *  r0,r3, LR: scrambled, but saved in scratch memory
233 * all other regs should have been unmodified by the exception handler,
234 * and ready to be saved
235 */
236__save_regs:
237	/* Note: The iframe must be 8 byte aligned. The stack pointer we are passed
238	   in r1 is aligned. So we store the floating point registers first and
239	   need to take care that an even number of 4 byte registers is stored,
240	   or insert padding respectively. */
241
242	/* push f0-f31 */
243	stfdu	%f0, -8(%r1)
244	stfdu	%f1, -8(%r1)
245	stfdu	%f2, -8(%r1)
246	stfdu	%f3, -8(%r1)
247	stfdu	%f4, -8(%r1)
248	stfdu	%f5, -8(%r1)
249	stfdu	%f6, -8(%r1)
250	stfdu	%f7, -8(%r1)
251	stfdu	%f8, -8(%r1)
252	stfdu	%f9, -8(%r1)
253	stfdu	%f10, -8(%r1)
254	stfdu	%f11, -8(%r1)
255	stfdu	%f12, -8(%r1)
256	stfdu	%f13, -8(%r1)
257	stfdu	%f14, -8(%r1)
258	stfdu	%f15, -8(%r1)
259	stfdu	%f16, -8(%r1)
260	stfdu	%f17, -8(%r1)
261	stfdu	%f18, -8(%r1)
262	stfdu	%f19, -8(%r1)
263	stfdu	%f20, -8(%r1)
264	stfdu	%f21, -8(%r1)
265	stfdu	%f22, -8(%r1)
266	stfdu	%f23, -8(%r1)
267	stfdu	%f24, -8(%r1)
268	stfdu	%f25, -8(%r1)
269	stfdu	%f26, -8(%r1)
270	stfdu	%f27, -8(%r1)
271	stfdu	%f28, -8(%r1)
272	stfdu	%f29, -8(%r1)
273	stfdu	%f30, -8(%r1)
274	stfdu	%f31, -8(%r1)
275
276	/* push r0-r3 */
277	lwz		%r0, 16(%r2)		/* original r0 */
278	stwu	%r0, -4(%r1)		/* push r0 */
279	mfsprg1	%r0					/* original r1 */
280	stwu	%r0, -4(%r1)		/* push r1 */
281	lwz		%r0, 24(%r2)		/* original r2 */
282	stwu	%r0, -4(%r1)		/* push r2 */
283	lwz		%r0, 28(%r2)		/* original r3 */
284	stwu	%r0, -4(%r1)		/* push r3 */
285
286	/* push r4-r31 */
287	stwu	%r4, -4(%r1)
288	stwu	%r5, -4(%r1)
289	stwu	%r6, -4(%r1)
290	stwu	%r7, -4(%r1)
291	stwu	%r8, -4(%r1)
292	stwu	%r9, -4(%r1)
293	stwu	%r10, -4(%r1)
294	stwu	%r11, -4(%r1)
295	stwu	%r12, -4(%r1)
296	stwu	%r13, -4(%r1)
297	stwu	%r14, -4(%r1)
298	stwu	%r15, -4(%r1)
299	stwu	%r16, -4(%r1)
300	stwu	%r17, -4(%r1)
301	stwu	%r18, -4(%r1)
302	stwu	%r19, -4(%r1)
303	stwu	%r20, -4(%r1)
304	stwu	%r21, -4(%r1)
305	stwu	%r22, -4(%r1)
306	stwu	%r23, -4(%r1)
307	stwu	%r24, -4(%r1)
308	stwu	%r25, -4(%r1)
309	stwu	%r26, -4(%r1)
310	stwu	%r27, -4(%r1)
311	stwu	%r28, -4(%r1)
312	stwu	%r29, -4(%r1)
313	stwu	%r30, -4(%r1)
314	stwu	%r31, -4(%r1)
315
316	/* save some of the other regs */
317	mffs	%f0
318	stfsu	%f0, -4(%r1)		/* push FPSCR */
319	mfctr	%r0
320	stwu	%r0, -4(%r1)		/* push CTR */
321	mfxer	%r0
322	stwu	%r0, -4(%r1)		/* push XER */
323	mfcr	%r0
324	stwu	%r0, -4(%r1)		/* push CR */
325	lwz		%r0, 20(%r2)		/* original LR */
326	stwu	%r0, -4(%r1)		/* push LR */
327	mfspr	%r0, %dsisr
328	stwu	%r0, -4(%r1)		/* push DSISR */
329	mfspr	%r0, %dar
330	stwu	%r0, -4(%r1)		/* push DAR */
331	mfspr	%r0, %srr1
332	stwu	%r0, -4(%r1)		/* push SRR1 */
333	mfspr	%r0, %srr0
334	stwu	%r0, -4(%r1)		/* push SRR0 */
335
336	stwu	%r3, -4(%r1)		/* exception vector offset */
337
338	blr
339
340
341/* called at the tail end of each of the exceptions
342 * r1: iframe pointer
343 */
344__restore_regs_and_rfi:
345	lwzu	%r0, 4(%r1)		/* SRR0 (skip vector offset) */
346	mtspr	%srr0, %r0
347	lwzu	%r0, 4(%r1)		/* SRR1 */
348	mtspr	%srr1, %r0
349	lwzu	%r0, 4(%r1)		/* DAR */
350	mtspr	%dar, %r0
351	lwzu	%r0, 4(%r1)		/* DSISR */
352	mtspr	%dsisr, %r0
353	lwzu	%r0, 4(%r1)		/* LR */
354	mtlr	%r0
355	lwzu	%r0, 4(%r1)		/* CR */
356	mtcr	%r0
357	lwzu	%r0, 4(%r1)		/* XER */
358	mtxer	%r0
359	lwzu	%r0, 4(%r1)		/* CTR */
360	mtctr	%r0
361	lfsu	%f0, 4(%r1)		/* FPSCR */
362	mtfsf	0xff, %f0
363
364	lwzu	%r31, 4(%r1)
365	lwzu	%r30, 4(%r1)
366	lwzu	%r29, 4(%r1)
367	lwzu	%r28, 4(%r1)
368	lwzu	%r27, 4(%r1)
369	lwzu	%r26, 4(%r1)
370	lwzu	%r25, 4(%r1)
371	lwzu	%r24, 4(%r1)
372	lwzu	%r23, 4(%r1)
373	lwzu	%r22, 4(%r1)
374	lwzu	%r21, 4(%r1)
375	lwzu	%r20, 4(%r1)
376	lwzu	%r19, 4(%r1)
377	lwzu	%r18, 4(%r1)
378	lwzu	%r17, 4(%r1)
379	lwzu	%r16, 4(%r1)
380	lwzu	%r15, 4(%r1)
381	lwzu	%r14, 4(%r1)
382	lwzu	%r13, 4(%r1)
383	lwzu	%r12, 4(%r1)
384	lwzu	%r11, 4(%r1)
385	lwzu	%r10, 4(%r1)
386	lwzu	%r9, 4(%r1)
387	lwzu	%r8, 4(%r1)
388	lwzu	%r7, 4(%r1)
389	lwzu	%r6, 4(%r1)
390	lwzu	%r5, 4(%r1)
391	lwzu	%r4, 4(%r1)
392	lwzu	%r3, 4(%r1)
393
394	/* Stop here, before we overwrite r1, and continue with the floating point
395	   registers first. */
396	addi	%r2, %r1, 16		/* skip r3-r0 */
397
398	/* f31-f0 */
399	lfd		%f31, 0(%r2)
400	lfdu	%f30, 8(%r2)
401	lfdu	%f29, 8(%r2)
402	lfdu	%f28, 8(%r2)
403	lfdu	%f27, 8(%r2)
404	lfdu	%f26, 8(%r2)
405	lfdu	%f25, 8(%r2)
406	lfdu	%f24, 8(%r2)
407	lfdu	%f23, 8(%r2)
408	lfdu	%f22, 8(%r2)
409	lfdu	%f21, 8(%r2)
410	lfdu	%f20, 8(%r2)
411	lfdu	%f19, 8(%r2)
412	lfdu	%f18, 8(%r2)
413	lfdu	%f17, 8(%r2)
414	lfdu	%f16, 8(%r2)
415	lfdu	%f15, 8(%r2)
416	lfdu	%f14, 8(%r2)
417	lfdu	%f13, 8(%r2)
418	lfdu	%f12, 8(%r2)
419	lfdu	%f11, 8(%r2)
420	lfdu	%f10, 8(%r2)
421	lfdu	%f9, 8(%r2)
422	lfdu	%f8, 8(%r2)
423	lfdu	%f7, 8(%r2)
424	lfdu	%f6, 8(%r2)
425	lfdu	%f5, 8(%r2)
426	lfdu	%f4, 8(%r2)
427	lfdu	%f3, 8(%r2)
428	lfdu	%f2, 8(%r2)
429	lfdu	%f1, 8(%r2)
430	lfd		%f0, 8(%r2)
431
432	/* r2-r0 */
433	lwzu	%r2, 4(%r1)
434	lwz		%r0, 8(%r1)
435	lwz		%r1, 4(%r1)
436
437	/* return from interrupt */
438	rfi
439