• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/powerpc/kernel/
1/*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependant assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15#include <asm/exception-64s.h>
16
17/*
18 * We layout physical memory as follows:
19 * 0x0000 - 0x00ff : Secondary processor spin code
20 * 0x0100 - 0x2fff : pSeries Interrupt prologs
21 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
22 * 0x6000 - 0x6fff : Initial (CPU0) segment table
23 * 0x7000 - 0x7fff : FWNMI data area
24 * 0x8000 -        : Early init and support code
25 */
26
27/*
28 * This is the start of the interrupt handlers for pSeries
29 * This code runs with relocation off.
30 * Code from here to __end_interrupts gets copied down to real
31 * address 0x100 when we are running a relocatable kernel.
32 * Therefore any relative branches in this section must only
33 * branch to labels in this section.
34 */
35	. = 0x100
36	.globl __start_interrupts
37__start_interrupts:
38
39	STD_EXCEPTION_PSERIES(0x100, system_reset)
40
41	. = 0x200
42_machine_check_pSeries:
43	HMT_MEDIUM
44	DO_KVM	0x200
45	mtspr	SPRN_SPRG_SCRATCH0,r13		/* save r13 */
46	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
47
48	. = 0x300
49	.globl data_access_pSeries
50data_access_pSeries:
51	HMT_MEDIUM
52	DO_KVM	0x300
53	mtspr	SPRN_SPRG_SCRATCH0,r13
54BEGIN_FTR_SECTION
55	mfspr	r13,SPRN_SPRG_PACA
56	std	r9,PACA_EXSLB+EX_R9(r13)
57	std	r10,PACA_EXSLB+EX_R10(r13)
58	mfspr	r10,SPRN_DAR
59	mfspr	r9,SPRN_DSISR
60	srdi	r10,r10,60
61	rlwimi	r10,r9,16,0x20
62	mfcr	r9
63	cmpwi	r10,0x2c
64	beq	do_stab_bolted_pSeries
65	ld	r10,PACA_EXSLB+EX_R10(r13)
66	std	r11,PACA_EXGEN+EX_R11(r13)
67	ld	r11,PACA_EXSLB+EX_R9(r13)
68	std	r12,PACA_EXGEN+EX_R12(r13)
69	mfspr	r12,SPRN_SPRG_SCRATCH0
70	std	r10,PACA_EXGEN+EX_R10(r13)
71	std	r11,PACA_EXGEN+EX_R9(r13)
72	std	r12,PACA_EXGEN+EX_R13(r13)
73	EXCEPTION_PROLOG_PSERIES_1(data_access_common)
74FTR_SECTION_ELSE
75	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
76ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB)
77
78	. = 0x380
79	.globl data_access_slb_pSeries
80data_access_slb_pSeries:
81	HMT_MEDIUM
82	DO_KVM	0x380
83	mtspr	SPRN_SPRG_SCRATCH0,r13
84	mfspr	r13,SPRN_SPRG_PACA		/* get paca address into r13 */
85	std	r3,PACA_EXSLB+EX_R3(r13)
86	mfspr	r3,SPRN_DAR
87	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
88	mfcr	r9
89#ifdef __DISABLED__
90	/* Keep that around for when we re-implement dynamic VSIDs */
91	cmpdi	r3,0
92	bge	slb_miss_user_pseries
93#endif /* __DISABLED__ */
94	std	r10,PACA_EXSLB+EX_R10(r13)
95	std	r11,PACA_EXSLB+EX_R11(r13)
96	std	r12,PACA_EXSLB+EX_R12(r13)
97	mfspr	r10,SPRN_SPRG_SCRATCH0
98	std	r10,PACA_EXSLB+EX_R13(r13)
99	mfspr	r12,SPRN_SRR1		/* and SRR1 */
100#ifndef CONFIG_RELOCATABLE
101	b	.slb_miss_realmode
102#else
103	/*
104	 * We can't just use a direct branch to .slb_miss_realmode
105	 * because the distance from here to there depends on where
106	 * the kernel ends up being put.
107	 */
108	mfctr	r11
109	ld	r10,PACAKBASE(r13)
110	LOAD_HANDLER(r10, .slb_miss_realmode)
111	mtctr	r10
112	bctr
113#endif
114
115	STD_EXCEPTION_PSERIES(0x400, instruction_access)
116
117	. = 0x480
118	.globl instruction_access_slb_pSeries
119instruction_access_slb_pSeries:
120	HMT_MEDIUM
121	DO_KVM	0x480
122	mtspr	SPRN_SPRG_SCRATCH0,r13
123	mfspr	r13,SPRN_SPRG_PACA		/* get paca address into r13 */
124	std	r3,PACA_EXSLB+EX_R3(r13)
125	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
126	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
127	mfcr	r9
128#ifdef __DISABLED__
129	/* Keep that around for when we re-implement dynamic VSIDs */
130	cmpdi	r3,0
131	bge	slb_miss_user_pseries
132#endif /* __DISABLED__ */
133	std	r10,PACA_EXSLB+EX_R10(r13)
134	std	r11,PACA_EXSLB+EX_R11(r13)
135	std	r12,PACA_EXSLB+EX_R12(r13)
136	mfspr	r10,SPRN_SPRG_SCRATCH0
137	std	r10,PACA_EXSLB+EX_R13(r13)
138	mfspr	r12,SPRN_SRR1		/* and SRR1 */
139#ifndef CONFIG_RELOCATABLE
140	b	.slb_miss_realmode
141#else
142	mfctr	r11
143	ld	r10,PACAKBASE(r13)
144	LOAD_HANDLER(r10, .slb_miss_realmode)
145	mtctr	r10
146	bctr
147#endif
148
149	MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
150	STD_EXCEPTION_PSERIES(0x600, alignment)
151	STD_EXCEPTION_PSERIES(0x700, program_check)
152	STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
153	MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
154	STD_EXCEPTION_PSERIES(0xa00, trap_0a)
155	STD_EXCEPTION_PSERIES(0xb00, trap_0b)
156
157	. = 0xc00
158	.globl	system_call_pSeries
159system_call_pSeries:
160	HMT_MEDIUM
161	DO_KVM	0xc00
162BEGIN_FTR_SECTION
163	cmpdi	r0,0x1ebe
164	beq-	1f
165END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
166	mr	r9,r13
167	mfspr	r13,SPRN_SPRG_PACA
168	mfspr	r11,SPRN_SRR0
169	ld	r12,PACAKBASE(r13)
170	ld	r10,PACAKMSR(r13)
171	LOAD_HANDLER(r12, system_call_entry)
172	mtspr	SPRN_SRR0,r12
173	mfspr	r12,SPRN_SRR1
174	mtspr	SPRN_SRR1,r10
175	rfid
176	b	.	/* prevent speculative execution */
177
178/* Fast LE/BE switch system call */
1791:	mfspr	r12,SPRN_SRR1
180	xori	r12,r12,MSR_LE
181	mtspr	SPRN_SRR1,r12
182	rfid		/* return to userspace */
183	b	.
184
185	STD_EXCEPTION_PSERIES(0xd00, single_step)
186	STD_EXCEPTION_PSERIES(0xe00, trap_0e)
187
188	/* We need to deal with the Altivec unavailable exception
189	 * here which is at 0xf20, thus in the middle of the
190	 * prolog code of the PerformanceMonitor one. A little
191	 * trickery is thus necessary
192	 */
193performance_monitor_pSeries_1:
194	. = 0xf00
195	DO_KVM	0xf00
196	b	performance_monitor_pSeries
197
198altivec_unavailable_pSeries_1:
199	. = 0xf20
200	DO_KVM	0xf20
201	b	altivec_unavailable_pSeries
202
203vsx_unavailable_pSeries_1:
204	. = 0xf40
205	DO_KVM	0xf40
206	b	vsx_unavailable_pSeries
207
208#ifdef CONFIG_CBE_RAS
209	HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
210#endif /* CONFIG_CBE_RAS */
211	STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
212#ifdef CONFIG_CBE_RAS
213	HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
214#endif /* CONFIG_CBE_RAS */
215	STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
216#ifdef CONFIG_CBE_RAS
217	HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
218#endif /* CONFIG_CBE_RAS */
219
220	. = 0x3000
221
222/*** pSeries interrupt support ***/
223
224	/* moved from 0xf00 */
225	STD_EXCEPTION_PSERIES(., performance_monitor)
226	STD_EXCEPTION_PSERIES(., altivec_unavailable)
227	STD_EXCEPTION_PSERIES(., vsx_unavailable)
228
229/*
230 * An interrupt came in while soft-disabled; clear EE in SRR1,
231 * clear paca->hard_enabled and return.
232 */
233masked_interrupt:
234	stb	r10,PACAHARDIRQEN(r13)
235	mtcrf	0x80,r9
236	ld	r9,PACA_EXGEN+EX_R9(r13)
237	mfspr	r10,SPRN_SRR1
238	rldicl	r10,r10,48,1		/* clear MSR_EE */
239	rotldi	r10,r10,16
240	mtspr	SPRN_SRR1,r10
241	ld	r10,PACA_EXGEN+EX_R10(r13)
242	mfspr	r13,SPRN_SPRG_SCRATCH0
243	rfid
244	b	.
245
246	.align	7
247do_stab_bolted_pSeries:
248	std	r11,PACA_EXSLB+EX_R11(r13)
249	std	r12,PACA_EXSLB+EX_R12(r13)
250	mfspr	r10,SPRN_SPRG_SCRATCH0
251	std	r10,PACA_EXSLB+EX_R13(r13)
252	EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted)
253
254#ifdef CONFIG_PPC_PSERIES
255/*
256 * Vectors for the FWNMI option.  Share common code.
257 */
258	.globl system_reset_fwnmi
259      .align 7
260system_reset_fwnmi:
261	HMT_MEDIUM
262	mtspr	SPRN_SPRG_SCRATCH0,r13		/* save r13 */
263	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
264
265	.globl machine_check_fwnmi
266      .align 7
267machine_check_fwnmi:
268	HMT_MEDIUM
269	mtspr	SPRN_SPRG_SCRATCH0,r13		/* save r13 */
270	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
271
272#endif /* CONFIG_PPC_PSERIES */
273
274#ifdef __DISABLED__
275/*
276 * This is used for when the SLB miss handler has to go virtual,
277 * which doesn't happen for now anymore but will once we re-implement
278 * dynamic VSIDs for shared page tables
279 */
280slb_miss_user_pseries:
281	std	r10,PACA_EXGEN+EX_R10(r13)
282	std	r11,PACA_EXGEN+EX_R11(r13)
283	std	r12,PACA_EXGEN+EX_R12(r13)
284	mfspr	r10,SPRG_SCRATCH0
285	ld	r11,PACA_EXSLB+EX_R9(r13)
286	ld	r12,PACA_EXSLB+EX_R3(r13)
287	std	r10,PACA_EXGEN+EX_R13(r13)
288	std	r11,PACA_EXGEN+EX_R9(r13)
289	std	r12,PACA_EXGEN+EX_R3(r13)
290	clrrdi	r12,r13,32
291	mfmsr	r10
292	mfspr	r11,SRR0			/* save SRR0 */
293	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
294	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
295	mtspr	SRR0,r12
296	mfspr	r12,SRR1			/* and SRR1 */
297	mtspr	SRR1,r10
298	rfid
299	b	.				/* prevent spec. execution */
300#endif /* __DISABLED__ */
301
302	.align	7
303	.globl	__end_interrupts
304__end_interrupts:
305
306/*
307 * Code from here down to __end_handlers is invoked from the
308 * exception prologs above.  Because the prologs assemble the
309 * addresses of these handlers using the LOAD_HANDLER macro,
310 * which uses an addi instruction, these handlers must be in
311 * the first 32k of the kernel image.
312 */
313
314/*** Common interrupt handlers ***/
315
316	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
317
318	/*
319	 * Machine check is different because we use a different
320	 * save area: PACA_EXMC instead of PACA_EXGEN.
321	 */
322	.align	7
323	.globl machine_check_common
324machine_check_common:
325	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
326	FINISH_NAP
327	DISABLE_INTS
328	bl	.save_nvgprs
329	addi	r3,r1,STACK_FRAME_OVERHEAD
330	bl	.machine_check_exception
331	b	.ret_from_except
332
333	STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
334	STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
335	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
336	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
337	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
338	STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
339	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
340#ifdef CONFIG_ALTIVEC
341	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
342#else
343	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
344#endif
345#ifdef CONFIG_CBE_RAS
346	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
347	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
348	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
349#endif /* CONFIG_CBE_RAS */
350
351	.align	7
352system_call_entry:
353	b	system_call_common
354
355/*
356 * Here we have detected that the kernel stack pointer is bad.
357 * R9 contains the saved CR, r13 points to the paca,
358 * r10 contains the (bad) kernel stack pointer,
359 * r11 and r12 contain the saved SRR0 and SRR1.
360 * We switch to using an emergency stack, save the registers there,
361 * and call kernel_bad_stack(), which panics.
362 */
363bad_stack:
364	ld	r1,PACAEMERGSP(r13)
365	subi	r1,r1,64+INT_FRAME_SIZE
366	std	r9,_CCR(r1)
367	std	r10,GPR1(r1)
368	std	r11,_NIP(r1)
369	std	r12,_MSR(r1)
370	mfspr	r11,SPRN_DAR
371	mfspr	r12,SPRN_DSISR
372	std	r11,_DAR(r1)
373	std	r12,_DSISR(r1)
374	mflr	r10
375	mfctr	r11
376	mfxer	r12
377	std	r10,_LINK(r1)
378	std	r11,_CTR(r1)
379	std	r12,_XER(r1)
380	SAVE_GPR(0,r1)
381	SAVE_GPR(2,r1)
382	SAVE_4GPRS(3,r1)
383	SAVE_2GPRS(7,r1)
384	SAVE_10GPRS(12,r1)
385	SAVE_10GPRS(22,r1)
386	lhz	r12,PACA_TRAP_SAVE(r13)
387	std	r12,_TRAP(r1)
388	addi	r11,r1,INT_FRAME_SIZE
389	std	r11,0(r1)
390	li	r12,0
391	std	r12,0(r11)
392	ld	r2,PACATOC(r13)
3931:	addi	r3,r1,STACK_FRAME_OVERHEAD
394	bl	.kernel_bad_stack
395	b	1b
396
397/*
398 * Here r13 points to the paca, r9 contains the saved CR,
399 * SRR0 and SRR1 are saved in r11 and r12,
400 * r9 - r13 are saved in paca->exgen.
401 */
402	.align	7
403	.globl data_access_common
404data_access_common:
405	mfspr	r10,SPRN_DAR
406	std	r10,PACA_EXGEN+EX_DAR(r13)
407	mfspr	r10,SPRN_DSISR
408	stw	r10,PACA_EXGEN+EX_DSISR(r13)
409	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
410	ld	r3,PACA_EXGEN+EX_DAR(r13)
411	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
412	li	r5,0x300
413	b	.do_hash_page	 	/* Try to handle as hpte fault */
414
415	.align	7
416	.globl instruction_access_common
417instruction_access_common:
418	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
419	ld	r3,_NIP(r1)
420	andis.	r4,r12,0x5820
421	li	r5,0x400
422	b	.do_hash_page		/* Try to handle as hpte fault */
423
424/*
425 * Here is the common SLB miss user that is used when going to virtual
426 * mode for SLB misses, that is currently not used
427 */
428#ifdef __DISABLED__
429	.align	7
430	.globl	slb_miss_user_common
431slb_miss_user_common:
432	mflr	r10
433	std	r3,PACA_EXGEN+EX_DAR(r13)
434	stw	r9,PACA_EXGEN+EX_CCR(r13)
435	std	r10,PACA_EXGEN+EX_LR(r13)
436	std	r11,PACA_EXGEN+EX_SRR0(r13)
437	bl	.slb_allocate_user
438
439	ld	r10,PACA_EXGEN+EX_LR(r13)
440	ld	r3,PACA_EXGEN+EX_R3(r13)
441	lwz	r9,PACA_EXGEN+EX_CCR(r13)
442	ld	r11,PACA_EXGEN+EX_SRR0(r13)
443	mtlr	r10
444	beq-	slb_miss_fault
445
446	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
447	beq-	unrecov_user_slb
448	mfmsr	r10
449
450.machine push
451.machine "power4"
452	mtcrf	0x80,r9
453.machine pop
454
455	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
456	mtmsrd	r10,1
457
458	mtspr	SRR0,r11
459	mtspr	SRR1,r12
460
461	ld	r9,PACA_EXGEN+EX_R9(r13)
462	ld	r10,PACA_EXGEN+EX_R10(r13)
463	ld	r11,PACA_EXGEN+EX_R11(r13)
464	ld	r12,PACA_EXGEN+EX_R12(r13)
465	ld	r13,PACA_EXGEN+EX_R13(r13)
466	rfid
467	b	.
468
469slb_miss_fault:
470	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
471	ld	r4,PACA_EXGEN+EX_DAR(r13)
472	li	r5,0
473	std	r4,_DAR(r1)
474	std	r5,_DSISR(r1)
475	b	handle_page_fault
476
477unrecov_user_slb:
478	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
479	DISABLE_INTS
480	bl	.save_nvgprs
4811:	addi	r3,r1,STACK_FRAME_OVERHEAD
482	bl	.unrecoverable_exception
483	b	1b
484
485#endif /* __DISABLED__ */
486
487
488/*
489 * r13 points to the PACA, r9 contains the saved CR,
490 * r12 contain the saved SRR1, SRR0 is still ready for return
491 * r3 has the faulting address
492 * r9 - r13 are saved in paca->exslb.
493 * r3 is saved in paca->slb_r3
494 * We assume we aren't going to take any exceptions during this procedure.
495 */
496_GLOBAL(slb_miss_realmode)
497	mflr	r10
498#ifdef CONFIG_RELOCATABLE
499	mtctr	r11
500#endif
501
502	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
503	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
504
505	bl	.slb_allocate_realmode
506
507	/* All done -- return from exception. */
508
509	ld	r10,PACA_EXSLB+EX_LR(r13)
510	ld	r3,PACA_EXSLB+EX_R3(r13)
511	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
512#ifdef CONFIG_PPC_ISERIES
513BEGIN_FW_FTR_SECTION
514	ld	r11,PACALPPACAPTR(r13)
515	ld	r11,LPPACASRR0(r11)		/* get SRR0 value */
516END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
517#endif /* CONFIG_PPC_ISERIES */
518
519	mtlr	r10
520
521	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
522	beq-	2f
523
524.machine	push
525.machine	"power4"
526	mtcrf	0x80,r9
527	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
528.machine	pop
529
530#ifdef CONFIG_PPC_ISERIES
531BEGIN_FW_FTR_SECTION
532	mtspr	SPRN_SRR0,r11
533	mtspr	SPRN_SRR1,r12
534END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
535#endif /* CONFIG_PPC_ISERIES */
536	ld	r9,PACA_EXSLB+EX_R9(r13)
537	ld	r10,PACA_EXSLB+EX_R10(r13)
538	ld	r11,PACA_EXSLB+EX_R11(r13)
539	ld	r12,PACA_EXSLB+EX_R12(r13)
540	ld	r13,PACA_EXSLB+EX_R13(r13)
541	rfid
542	b	.	/* prevent speculative execution */
543
5442:
545#ifdef CONFIG_PPC_ISERIES
546BEGIN_FW_FTR_SECTION
547	b	unrecov_slb
548END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
549#endif /* CONFIG_PPC_ISERIES */
550	mfspr	r11,SPRN_SRR0
551	ld	r10,PACAKBASE(r13)
552	LOAD_HANDLER(r10,unrecov_slb)
553	mtspr	SPRN_SRR0,r10
554	ld	r10,PACAKMSR(r13)
555	mtspr	SPRN_SRR1,r10
556	rfid
557	b	.
558
559unrecov_slb:
560	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
561	DISABLE_INTS
562	bl	.save_nvgprs
5631:	addi	r3,r1,STACK_FRAME_OVERHEAD
564	bl	.unrecoverable_exception
565	b	1b
566
567	.align	7
568	.globl hardware_interrupt_common
569	.globl hardware_interrupt_entry
570hardware_interrupt_common:
571	EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
572	FINISH_NAP
573hardware_interrupt_entry:
574	DISABLE_INTS
575BEGIN_FTR_SECTION
576	bl	.ppc64_runlatch_on
577END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
578	addi	r3,r1,STACK_FRAME_OVERHEAD
579	bl	.do_IRQ
580	b	.ret_from_except_lite
581
582#ifdef CONFIG_PPC_970_NAP
583power4_fixup_nap:
584	andc	r9,r9,r10
585	std	r9,TI_LOCAL_FLAGS(r11)
586	ld	r10,_LINK(r1)		/* make idle task do the */
587	std	r10,_NIP(r1)		/* equivalent of a blr */
588	blr
589#endif
590
591	.align	7
592	.globl alignment_common
593alignment_common:
594	mfspr	r10,SPRN_DAR
595	std	r10,PACA_EXGEN+EX_DAR(r13)
596	mfspr	r10,SPRN_DSISR
597	stw	r10,PACA_EXGEN+EX_DSISR(r13)
598	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
599	ld	r3,PACA_EXGEN+EX_DAR(r13)
600	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
601	std	r3,_DAR(r1)
602	std	r4,_DSISR(r1)
603	bl	.save_nvgprs
604	addi	r3,r1,STACK_FRAME_OVERHEAD
605	ENABLE_INTS
606	bl	.alignment_exception
607	b	.ret_from_except
608
609	.align	7
610	.globl program_check_common
611program_check_common:
612	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
613	bl	.save_nvgprs
614	addi	r3,r1,STACK_FRAME_OVERHEAD
615	ENABLE_INTS
616	bl	.program_check_exception
617	b	.ret_from_except
618
619	.align	7
620	.globl fp_unavailable_common
621fp_unavailable_common:
622	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
623	bne	1f			/* if from user, just load it up */
624	bl	.save_nvgprs
625	addi	r3,r1,STACK_FRAME_OVERHEAD
626	ENABLE_INTS
627	bl	.kernel_fp_unavailable_exception
628	BUG_OPCODE
6291:	bl	.load_up_fpu
630	b	fast_exception_return
631
632	.align	7
633	.globl altivec_unavailable_common
634altivec_unavailable_common:
635	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
636#ifdef CONFIG_ALTIVEC
637BEGIN_FTR_SECTION
638	beq	1f
639	bl	.load_up_altivec
640	b	fast_exception_return
6411:
642END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
643#endif
644	bl	.save_nvgprs
645	addi	r3,r1,STACK_FRAME_OVERHEAD
646	ENABLE_INTS
647	bl	.altivec_unavailable_exception
648	b	.ret_from_except
649
650	.align	7
651	.globl vsx_unavailable_common
652vsx_unavailable_common:
653	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
654#ifdef CONFIG_VSX
655BEGIN_FTR_SECTION
656	bne	.load_up_vsx
6571:
658END_FTR_SECTION_IFSET(CPU_FTR_VSX)
659#endif
660	bl	.save_nvgprs
661	addi	r3,r1,STACK_FRAME_OVERHEAD
662	ENABLE_INTS
663	bl	.vsx_unavailable_exception
664	b	.ret_from_except
665
666	.align	7
667	.globl	__end_handlers
668__end_handlers:
669
670/*
671 * Return from an exception with minimal checks.
672 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
673 * If interrupts have been enabled, or anything has been
674 * done that might have changed the scheduling status of
675 * any task or sent any task a signal, you should use
676 * ret_from_except or ret_from_except_lite instead of this.
677 */
678fast_exc_return_irq:			/* restores irq state too */
679	ld	r3,SOFTE(r1)
680	TRACE_AND_RESTORE_IRQ(r3);
681	ld	r12,_MSR(r1)
682	rldicl	r4,r12,49,63		/* get MSR_EE to LSB */
683	stb	r4,PACAHARDIRQEN(r13)	/* restore paca->hard_enabled */
684	b	1f
685
686	.globl	fast_exception_return
687fast_exception_return:
688	ld	r12,_MSR(r1)
6891:	ld	r11,_NIP(r1)
690	andi.	r3,r12,MSR_RI		/* check if RI is set */
691	beq-	unrecov_fer
692
693#ifdef CONFIG_VIRT_CPU_ACCOUNTING
694	andi.	r3,r12,MSR_PR
695	beq	2f
696	ACCOUNT_CPU_USER_EXIT(r3, r4)
6972:
698#endif
699
700	ld	r3,_CCR(r1)
701	ld	r4,_LINK(r1)
702	ld	r5,_CTR(r1)
703	ld	r6,_XER(r1)
704	mtcr	r3
705	mtlr	r4
706	mtctr	r5
707	mtxer	r6
708	REST_GPR(0, r1)
709	REST_8GPRS(2, r1)
710
711	mfmsr	r10
712	rldicl	r10,r10,48,1		/* clear EE */
713	rldicr	r10,r10,16,61		/* clear RI (LE is 0 already) */
714	mtmsrd	r10,1
715
716	mtspr	SPRN_SRR1,r12
717	mtspr	SPRN_SRR0,r11
718	REST_4GPRS(10, r1)
719	ld	r1,GPR1(r1)
720	rfid
721	b	.	/* prevent speculative execution */
722
723unrecov_fer:
724	bl	.save_nvgprs
7251:	addi	r3,r1,STACK_FRAME_OVERHEAD
726	bl	.unrecoverable_exception
727	b	1b
728
729
730/*
731 * Hash table stuff
732 */
733	.align	7
734_STATIC(do_hash_page)
735	std	r3,_DAR(r1)
736	std	r4,_DSISR(r1)
737
738	andis.	r0,r4,0xa410		/* weird error? */
739	bne-	handle_page_fault	/* if not, try to insert a HPTE */
740	andis.  r0,r4,DSISR_DABRMATCH@h
741	bne-    handle_dabr_fault
742
743BEGIN_FTR_SECTION
744	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
745	bne-	do_ste_alloc		/* If so handle it */
746END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
747
748	clrrdi	r11,r1,THREAD_SHIFT
749	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
750	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
751	bne	77f			/* then don't call hash_page now */
752
753	/*
754	 * On iSeries, we soft-disable interrupts here, then
755	 * hard-enable interrupts so that the hash_page code can spin on
756	 * the hash_table_lock without problems on a shared processor.
757	 */
758	DISABLE_INTS
759
760	/*
761	 * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
762	 * and will clobber volatile registers when irq tracing is enabled
763	 * so we need to reload them. It may be possible to be smarter here
764	 * and move the irq tracing elsewhere but let's keep it simple for
765	 * now
766	 */
767#ifdef CONFIG_TRACE_IRQFLAGS
768	ld	r3,_DAR(r1)
769	ld	r4,_DSISR(r1)
770	ld	r5,_TRAP(r1)
771	ld	r12,_MSR(r1)
772	clrrdi	r5,r5,4
773#endif /* CONFIG_TRACE_IRQFLAGS */
774	/*
775	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
776	 * accessing a userspace segment (even from the kernel). We assume
777	 * kernel addresses always have the high bit set.
778	 */
779	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
780	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
781	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
782	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
783	ori	r4,r4,1			/* add _PAGE_PRESENT */
784	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
785
786	/*
787	 * r3 contains the faulting address
788	 * r4 contains the required access permissions
789	 * r5 contains the trap number
790	 *
791	 * at return r3 = 0 for success
792	 */
793	bl	.hash_page		/* build HPTE if possible */
794	cmpdi	r3,0			/* see if hash_page succeeded */
795
796BEGIN_FW_FTR_SECTION
797	/*
798	 * If we had interrupts soft-enabled at the point where the
799	 * DSI/ISI occurred, and an interrupt came in during hash_page,
800	 * handle it now.
801	 * We jump to ret_from_except_lite rather than fast_exception_return
802	 * because ret_from_except_lite will check for and handle pending
803	 * interrupts if necessary.
804	 */
805	beq	13f
806END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
807
808BEGIN_FW_FTR_SECTION
809	/*
810	 * Here we have interrupts hard-disabled, so it is sufficient
811	 * to restore paca->{soft,hard}_enable and get out.
812	 */
813	beq	fast_exc_return_irq	/* Return from exception on success */
814END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
815
816	/* For a hash failure, we don't bother re-enabling interrupts */
817	ble-	12f
818
819	/*
820	 * hash_page couldn't handle it, set soft interrupt enable back
821	 * to what it was before the trap.  Note that .raw_local_irq_restore
822	 * handles any interrupts pending at this point.
823	 */
824	ld	r3,SOFTE(r1)
825	TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
826	bl	.raw_local_irq_restore
827	b	11f
828
829/* We have a data breakpoint exception - handle it */
830handle_dabr_fault:
831	bl	.save_nvgprs
832	ld      r4,_DAR(r1)
833	ld      r5,_DSISR(r1)
834	addi    r3,r1,STACK_FRAME_OVERHEAD
835	bl      .do_dabr
836	b       .ret_from_except_lite
837
838/* Here we have a page fault that hash_page can't handle. */
839handle_page_fault:
840	ENABLE_INTS
84111:	ld	r4,_DAR(r1)
842	ld	r5,_DSISR(r1)
843	addi	r3,r1,STACK_FRAME_OVERHEAD
844	bl	.do_page_fault
845	cmpdi	r3,0
846	beq+	13f
847	bl	.save_nvgprs
848	mr	r5,r3
849	addi	r3,r1,STACK_FRAME_OVERHEAD
850	lwz	r4,_DAR(r1)
851	bl	.bad_page_fault
852	b	.ret_from_except
853
85413:	b	.ret_from_except_lite
855
856/* We have a page fault that hash_page could handle but HV refused
857 * the PTE insertion
858 */
85912:	bl	.save_nvgprs
860	mr	r5,r3
861	addi	r3,r1,STACK_FRAME_OVERHEAD
862	ld	r4,_DAR(r1)
863	bl	.low_hash_fault
864	b	.ret_from_except
865
866/*
867 * We come here as a result of a DSI at a point where we don't want
868 * to call hash_page, such as when we are accessing memory (possibly
869 * user memory) inside a PMU interrupt that occurred while interrupts
870 * were soft-disabled.  We want to invoke the exception handler for
871 * the access, or panic if there isn't a handler.
872 */
87377:	bl	.save_nvgprs
874	mr	r4,r3
875	addi	r3,r1,STACK_FRAME_OVERHEAD
876	li	r5,SIGSEGV
877	bl	.bad_page_fault
878	b	.ret_from_except
879
880	/* here we have a segment miss */
881do_ste_alloc:
882	bl	.ste_allocate		/* try to insert stab entry */
883	cmpdi	r3,0
884	bne-	handle_page_fault
885	b	fast_exception_return
886
887/*
888 * r13 points to the PACA, r9 contains the saved CR,
889 * r11 and r12 contain the saved SRR0 and SRR1.
890 * r9 - r13 are saved in paca->exslb.
891 * We assume we aren't going to take any exceptions during this procedure.
892 * We assume (DAR >> 60) == 0xc.
893 */
894	.align	7
895_GLOBAL(do_stab_bolted)
896	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
897	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
898
899	/* Hash to the primary group */
900	ld	r10,PACASTABVIRT(r13)
901	mfspr	r11,SPRN_DAR
902	srdi	r11,r11,28
903	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
904
905	/* Calculate VSID */
906	/* This is a kernel address, so protovsid = ESID */
907	ASM_VSID_SCRAMBLE(r11, r9, 256M)
908	rldic	r9,r11,12,16	/* r9 = vsid << 12 */
909
910	/* Search the primary group for a free entry */
9111:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
912	andi.	r11,r11,0x80
913	beq	2f
914	addi	r10,r10,16
915	andi.	r11,r10,0x70
916	bne	1b
917
918	/* Stick for only searching the primary group for now.		*/
919	/* At least for now, we use a very simple random castout scheme */
920	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
921	mftb	r11
922	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
923	ori	r11,r11,0x10
924
925	/* r10 currently points to an ste one past the group of interest */
926	/* make it point to the randomly selected entry			*/
927	subi	r10,r10,128
928	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
929
930	isync			/* mark the entry invalid		*/
931	ld	r11,0(r10)
932	rldicl	r11,r11,56,1	/* clear the valid bit */
933	rotldi	r11,r11,8
934	std	r11,0(r10)
935	sync
936
937	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
938	slbie	r11
939
9402:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
941	eieio
942
943	mfspr	r11,SPRN_DAR		/* Get the new esid			*/
944	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
945	ori	r11,r11,0x90	/* Turn on valid and kp			*/
946	std	r11,0(r10)	/* Put new entry back into the stab	*/
947
948	sync
949
950	/* All done -- return from exception. */
951	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
952	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
953
954	andi.	r10,r12,MSR_RI
955	beq-	unrecov_slb
956
957	mtcrf	0x80,r9			/* restore CR */
958
959	mfmsr	r10
960	clrrdi	r10,r10,2
961	mtmsrd	r10,1
962
963	mtspr	SPRN_SRR0,r11
964	mtspr	SPRN_SRR1,r12
965	ld	r9,PACA_EXSLB+EX_R9(r13)
966	ld	r10,PACA_EXSLB+EX_R10(r13)
967	ld	r11,PACA_EXSLB+EX_R11(r13)
968	ld	r12,PACA_EXSLB+EX_R12(r13)
969	ld	r13,PACA_EXSLB+EX_R13(r13)
970	rfid
971	b	.	/* prevent speculative execution */
972
973/*
974 * Space for CPU0's segment table.
975 *
976 * On iSeries, the hypervisor must fill in at least one entry before
977 * we get control (with relocate on).  The address is given to the hv
978 * as a page number (see xLparMap below), so this must be at a
979 * fixed address (the linker can't compute (u64)&initial_stab >>
980 * PAGE_SHIFT).
981 */
982	. = STAB0_OFFSET	/* 0x6000 */
983	.globl initial_stab
984initial_stab:
985	.space	4096
986
987#ifdef CONFIG_PPC_PSERIES
988/*
989 * Data area reserved for FWNMI option.
990 * This address (0x7000) is fixed by the RPA.
991 */
992	.= 0x7000
993	.globl fwnmi_data_area
994fwnmi_data_area:
995#endif /* CONFIG_PPC_PSERIES */
996
997	/* iSeries does not use the FWNMI stuff, so it is safe to put
998	 * this here, even if we later allow kernels that will boot on
999	 * both pSeries and iSeries */
1000#ifdef CONFIG_PPC_ISERIES
1001        . = LPARMAP_PHYS
1002	.globl xLparMap
1003xLparMap:
1004	.quad	HvEsidsToMap		/* xNumberEsids */
1005	.quad	HvRangesToMap		/* xNumberRanges */
1006	.quad	STAB0_PAGE		/* xSegmentTableOffs */
1007	.zero	40			/* xRsvd */
1008	/* xEsids (HvEsidsToMap entries of 2 quads) */
1009	.quad	PAGE_OFFSET_ESID	/* xKernelEsid */
1010	.quad	PAGE_OFFSET_VSID	/* xKernelVsid */
1011	.quad	VMALLOC_START_ESID	/* xKernelEsid */
1012	.quad	VMALLOC_START_VSID	/* xKernelVsid */
1013	/* xRanges (HvRangesToMap entries of 3 quads) */
1014	.quad	HvPagesToMap		/* xPages */
1015	.quad	0			/* xOffset */
1016	.quad	PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT)	/* xVPN */
1017
1018#endif /* CONFIG_PPC_ISERIES */
1019
1020#ifdef CONFIG_PPC_PSERIES
1021        . = 0x8000
1022#endif /* CONFIG_PPC_PSERIES */
1023