1/*
2 *    Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
3 *      Initial PowerPC version.
4 *    Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
5 *      Rewritten for PReP
6 *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
7 *      Low-level exception handers, MMU support, and rewrite.
8 *    Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
9 *      PowerPC 8xx modifications.
10 *    Copyright (c) 1998-1999 TiVo, Inc.
11 *      PowerPC 403GCX modifications.
12 *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
13 *      PowerPC 403GCX/405GP modifications.
14 *    Copyright 2000 MontaVista Software Inc.
15 *	PPC405 modifications
16 *      PowerPC 403GCX/405GP modifications.
17 * 	Author: MontaVista Software, Inc.
18 *         	frank_rowand@mvista.com or source@mvista.com
19 * 	   	debbie_chu@mvista.com
20 *
21 *
22 *    Module name: head_4xx.S
23 *
24 *    Description:
25 *      Kernel execution entry point code.
26 *
27 *    This program is free software; you can redistribute it and/or
28 *    modify it under the terms of the GNU General Public License
29 *    as published by the Free Software Foundation; either version
30 *    2 of the License, or (at your option) any later version.
31 *
32 */
33
34#include <asm/processor.h>
35#include <asm/page.h>
36#include <asm/mmu.h>
37#include <asm/pgtable.h>
38#include <asm/ibm4xx.h>
39#include <asm/cputable.h>
40#include <asm/thread_info.h>
41#include <asm/ppc_asm.h>
42#include <asm/asm-offsets.h>
43
44/* As with the other PowerPC ports, it is expected that when code
45 * execution begins here, the following registers contain valid, yet
46 * optional, information:
47 *
48 *   r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
49 *   r4 - Starting address of the init RAM disk
50 *   r5 - Ending address of the init RAM disk
51 *   r6 - Start of kernel command line string (e.g. "mem=96m")
52 *   r7 - End of kernel command line string
53 *
54 * This is all going to change RSN when we add bi_recs.......  -- Dan
55 */
56	.text
57_GLOBAL(_stext)
58_GLOBAL(_start)
59
60	/* Save parameters we are passed.
61	*/
62	mr	r31,r3
63	mr	r30,r4
64	mr	r29,r5
65	mr	r28,r6
66	mr	r27,r7
67
68	/* We have to turn on the MMU right away so we get cache modes
69	 * set correctly.
70	 */
71	bl	initial_mmu
72
73/* We now have the lower 16 Meg mapped into TLB entries, and the caches
74 * ready to work.
75 */
76turn_on_mmu:
77	lis	r0,MSR_KERNEL@h
78	ori	r0,r0,MSR_KERNEL@l
79	mtspr	SPRN_SRR1,r0
80	lis	r0,start_here@h
81	ori	r0,r0,start_here@l
82	mtspr	SPRN_SRR0,r0
83	SYNC
84	rfi				/* enables MMU */
85	b	.			/* prevent prefetch past rfi */
86
87/*
88 * This area is used for temporarily saving registers during the
89 * critical exception prolog.
90 */
91	. = 0xc0
92crit_save:
93_GLOBAL(crit_r10)
94	.space	4
95_GLOBAL(crit_r11)
96	.space	4
97
98/*
99 * Exception vector entry code. This code runs with address translation
100 * turned off (i.e. using physical addresses). We assume SPRG3 has the
101 * physical address of the current task thread_struct.
102 * Note that we have to have decremented r1 before we write to any fields
103 * of the exception frame, since a critical interrupt could occur at any
104 * time, and it will write to the area immediately below the current r1.
105 */
106#define NORMAL_EXCEPTION_PROLOG						     \
107	mtspr	SPRN_SPRG0,r10;		/* save two registers to work with */\
108	mtspr	SPRN_SPRG1,r11;						     \
109	mtspr	SPRN_SPRG2,r1;						     \
110	mfcr	r10;			/* save CR in r10 for now	   */\
111	mfspr	r11,SPRN_SRR1;		/* check whether user or kernel    */\
112	andi.	r11,r11,MSR_PR;						     \
113	beq	1f;							     \
114	mfspr	r1,SPRN_SPRG3;		/* if from user, start at top of   */\
115	lwz	r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack   */\
116	addi	r1,r1,THREAD_SIZE;					     \
1171:	subi	r1,r1,INT_FRAME_SIZE;	/* Allocate an exception frame     */\
118	tophys(r11,r1);							     \
119	stw	r10,_CCR(r11);          /* save various registers	   */\
120	stw	r12,GPR12(r11);						     \
121	stw	r9,GPR9(r11);						     \
122	mfspr	r10,SPRN_SPRG0;						     \
123	stw	r10,GPR10(r11);						     \
124	mfspr	r12,SPRN_SPRG1;						     \
125	stw	r12,GPR11(r11);						     \
126	mflr	r10;							     \
127	stw	r10,_LINK(r11);						     \
128	mfspr	r10,SPRN_SPRG2;						     \
129	mfspr	r12,SPRN_SRR0;						     \
130	stw	r10,GPR1(r11);						     \
131	mfspr	r9,SPRN_SRR1;						     \
132	stw	r10,0(r11);						     \
133	rlwinm	r9,r9,0,14,12;		/* clear MSR_WE (necessary?)	   */\
134	stw	r0,GPR0(r11);						     \
135	SAVE_4GPRS(3, r11);						     \
136	SAVE_2GPRS(7, r11)
137
138/*
139 * Exception prolog for critical exceptions.  This is a little different
140 * from the normal exception prolog above since a critical exception
141 * can potentially occur at any point during normal exception processing.
142 * Thus we cannot use the same SPRG registers as the normal prolog above.
143 * Instead we use a couple of words of memory at low physical addresses.
144 * This is OK since we don't support SMP on these processors.
145 */
146#define CRITICAL_EXCEPTION_PROLOG					     \
147	stw	r10,crit_r10@l(0);	/* save two registers to work with */\
148	stw	r11,crit_r11@l(0);					     \
149	mfcr	r10;			/* save CR in r10 for now	   */\
150	mfspr	r11,SPRN_SRR3;		/* check whether user or kernel    */\
151	andi.	r11,r11,MSR_PR;						     \
152	lis	r11,critical_stack_top@h;				     \
153	ori	r11,r11,critical_stack_top@l;				     \
154	beq	1f;							     \
155	/* COMING FROM USER MODE */					     \
156	mfspr	r11,SPRN_SPRG3;		/* if from user, start at top of   */\
157	lwz	r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
158	addi	r11,r11,THREAD_SIZE;					     \
1591:	subi	r11,r11,INT_FRAME_SIZE;	/* Allocate an exception frame     */\
160	tophys(r11,r11);						     \
161	stw	r10,_CCR(r11);          /* save various registers	   */\
162	stw	r12,GPR12(r11);						     \
163	stw	r9,GPR9(r11);						     \
164	mflr	r10;							     \
165	stw	r10,_LINK(r11);						     \
166	mfspr	r12,SPRN_DEAR;		/* save DEAR and ESR in the frame  */\
167	stw	r12,_DEAR(r11);		/* since they may have had stuff   */\
168	mfspr	r9,SPRN_ESR;		/* in them at the point where the  */\
169	stw	r9,_ESR(r11);		/* exception was taken		   */\
170	mfspr	r12,SPRN_SRR2;						     \
171	stw	r1,GPR1(r11);						     \
172	mfspr	r9,SPRN_SRR3;						     \
173	stw	r1,0(r11);						     \
174	tovirt(r1,r11);							     \
175	rlwinm	r9,r9,0,14,12;		/* clear MSR_WE (necessary?)	   */\
176	stw	r0,GPR0(r11);						     \
177	SAVE_4GPRS(3, r11);						     \
178	SAVE_2GPRS(7, r11)
179
180	/*
181	 * State at this point:
182	 * r9 saved in stack frame, now saved SRR3 & ~MSR_WE
183	 * r10 saved in crit_r10 and in stack frame, trashed
184	 * r11 saved in crit_r11 and in stack frame,
185	 *	now phys stack/exception frame pointer
186	 * r12 saved in stack frame, now saved SRR2
187	 * CR saved in stack frame, CR0.EQ = !SRR3.PR
188	 * LR, DEAR, ESR in stack frame
189	 * r1 saved in stack frame, now virt stack/excframe pointer
190	 * r0, r3-r8 saved in stack frame
191	 */
192
193/*
194 * Exception vectors.
195 */
196#define	START_EXCEPTION(n, label)					     \
197	. = n;								     \
198label:
199
200#define EXCEPTION(n, label, hdlr, xfer)				\
201	START_EXCEPTION(n, label);				\
202	NORMAL_EXCEPTION_PROLOG;				\
203	addi	r3,r1,STACK_FRAME_OVERHEAD;			\
204	xfer(n, hdlr)
205
206#define CRITICAL_EXCEPTION(n, label, hdlr)			\
207	START_EXCEPTION(n, label);				\
208	CRITICAL_EXCEPTION_PROLOG;				\
209	addi	r3,r1,STACK_FRAME_OVERHEAD;			\
210	EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
211			  NOCOPY, crit_transfer_to_handler,	\
212			  ret_from_crit_exc)
213
214#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret)	\
215	li	r10,trap;					\
216	stw	r10,_TRAP(r11);					\
217	lis	r10,msr@h;					\
218	ori	r10,r10,msr@l;					\
219	copyee(r10, r9);					\
220	bl	tfer;		 				\
221	.long	hdlr;						\
222	.long	ret
223
224#define COPY_EE(d, s)		rlwimi d,s,0,16,16
225#define NOCOPY(d, s)
226
227#define EXC_XFER_STD(n, hdlr)		\
228	EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \
229			  ret_from_except_full)
230
231#define EXC_XFER_LITE(n, hdlr)		\
232	EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
233			  ret_from_except)
234
235#define EXC_XFER_EE(n, hdlr)		\
236	EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \
237			  ret_from_except_full)
238
239#define EXC_XFER_EE_LITE(n, hdlr)	\
240	EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \
241			  ret_from_except)
242
243
244/*
245 * 0x0100 - Critical Interrupt Exception
246 */
247	CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception)
248
249/*
250 * 0x0200 - Machine Check Exception
251 */
252	CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
253
254/*
255 * 0x0300 - Data Storage Exception
256 * This happens for just a few reasons.  U0 set (but we don't do that),
257 * or zone protection fault (user violation, write to protected page).
258 * If this is just an update of modified status, we do that quickly
259 * and exit.  Otherwise, we call heavywight functions to do the work.
260 */
261	START_EXCEPTION(0x0300,	DataStorage)
262	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
263	mtspr	SPRN_SPRG1, r11
264#ifdef CONFIG_403GCX
265	stw     r12, 0(r0)
266	stw     r9, 4(r0)
267	mfcr    r11
268	mfspr   r12, SPRN_PID
269	stw     r11, 8(r0)
270	stw     r12, 12(r0)
271#else
272	mtspr	SPRN_SPRG4, r12
273	mtspr	SPRN_SPRG5, r9
274	mfcr	r11
275	mfspr	r12, SPRN_PID
276	mtspr	SPRN_SPRG7, r11
277	mtspr	SPRN_SPRG6, r12
278#endif
279
280	/* First, check if it was a zone fault (which means a user
281	* tried to access a kernel or read-protected page - always
282	* a SEGV).  All other faults here must be stores, so no
283	* need to check ESR_DST as well. */
284	mfspr	r10, SPRN_ESR
285	andis.	r10, r10, ESR_DIZ@h
286	bne	2f
287
288	mfspr	r10, SPRN_DEAR		/* Get faulting address */
289
290	/* If we are faulting a kernel address, we have to use the
291	 * kernel page tables.
292	 */
293	lis	r11, TASK_SIZE@h
294	cmplw	r10, r11
295	blt+	3f
296	lis	r11, swapper_pg_dir@h
297	ori	r11, r11, swapper_pg_dir@l
298	li	r9, 0
299	mtspr	SPRN_PID, r9		/* TLB will have 0 TID */
300	b	4f
301
302	/* Get the PGD for the current thread.
303	 */
3043:
305	mfspr	r11,SPRN_SPRG3
306	lwz	r11,PGDIR(r11)
3074:
308	tophys(r11, r11)
309	rlwimi	r11, r10, 12, 20, 29	/* Create L1 (pgdir/pmd) address */
310	lwz	r11, 0(r11)		/* Get L1 entry */
311	rlwinm.	r12, r11, 0, 0, 19	/* Extract L2 (pte) base address */
312	beq	2f			/* Bail if no table */
313
314	rlwimi	r12, r10, 22, 20, 29	/* Compute PTE address */
315	lwz	r11, 0(r12)		/* Get Linux PTE */
316
317	andi.	r9, r11, _PAGE_RW	/* Is it writeable? */
318	beq	2f			/* Bail if not */
319
320	/* Update 'changed'.
321	*/
322	ori	r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
323	stw	r11, 0(r12)		/* Update Linux page table */
324
325	/* Most of the Linux PTE is ready to load into the TLB LO.
326	 * We set ZSEL, where only the LS-bit determines user access.
327	 * We set execute, because we don't have the granularity to
328	 * properly set this at the page level (Linux problem).
329	 * If shared is set, we cause a zero PID->TID load.
330	 * Many of these bits are software only.  Bits we don't set
331	 * here we (properly should) assume have the appropriate value.
332	 */
333	li	r12, 0x0ce2
334	andc	r11, r11, r12		/* Make sure 20, 21 are zero */
335
336	/* find the TLB index that caused the fault.  It has to be here.
337	*/
338	tlbsx	r9, 0, r10
339
340	tlbwe	r11, r9, TLB_DATA		/* Load TLB LO */
341
342	/* Done...restore registers and get out of here.
343	*/
344#ifdef CONFIG_403GCX
345	lwz     r12, 12(r0)
346	lwz     r11, 8(r0)
347	mtspr   SPRN_PID, r12
348	mtcr    r11
349	lwz     r9, 4(r0)
350	lwz     r12, 0(r0)
351#else
352	mfspr	r12, SPRN_SPRG6
353	mfspr	r11, SPRN_SPRG7
354	mtspr	SPRN_PID, r12
355	mtcr	r11
356	mfspr	r9, SPRN_SPRG5
357	mfspr	r12, SPRN_SPRG4
358#endif
359	mfspr	r11, SPRN_SPRG1
360	mfspr	r10, SPRN_SPRG0
361	PPC405_ERR77_SYNC
362	rfi			/* Should sync shadow TLBs */
363	b	.		/* prevent prefetch past rfi */
364
3652:
366	/* The bailout.  Restore registers to pre-exception conditions
367	 * and call the heavyweights to help us out.
368	 */
369#ifdef CONFIG_403GCX
370	lwz     r12, 12(r0)
371	lwz     r11, 8(r0)
372	mtspr   SPRN_PID, r12
373	mtcr    r11
374	lwz     r9, 4(r0)
375	lwz     r12, 0(r0)
376#else
377	mfspr	r12, SPRN_SPRG6
378	mfspr	r11, SPRN_SPRG7
379	mtspr	SPRN_PID, r12
380	mtcr	r11
381	mfspr	r9, SPRN_SPRG5
382	mfspr	r12, SPRN_SPRG4
383#endif
384	mfspr	r11, SPRN_SPRG1
385	mfspr	r10, SPRN_SPRG0
386	b	DataAccess
387
388/*
389 * 0x0400 - Instruction Storage Exception
390 * This is caused by a fetch from non-execute or guarded pages.
391 */
392	START_EXCEPTION(0x0400, InstructionAccess)
393	NORMAL_EXCEPTION_PROLOG
394	mr	r4,r12			/* Pass SRR0 as arg2 */
395	li	r5,0			/* Pass zero as arg3 */
396	EXC_XFER_EE_LITE(0x400, handle_page_fault)
397
398/* 0x0500 - External Interrupt Exception */
399	EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
400
401/* 0x0600 - Alignment Exception */
402	START_EXCEPTION(0x0600, Alignment)
403	NORMAL_EXCEPTION_PROLOG
404	mfspr	r4,SPRN_DEAR		/* Grab the DEAR and save it */
405	stw	r4,_DEAR(r11)
406	addi	r3,r1,STACK_FRAME_OVERHEAD
407	EXC_XFER_EE(0x600, alignment_exception)
408
409/* 0x0700 - Program Exception */
410	START_EXCEPTION(0x0700, ProgramCheck)
411	NORMAL_EXCEPTION_PROLOG
412	mfspr	r4,SPRN_ESR		/* Grab the ESR and save it */
413	stw	r4,_ESR(r11)
414	addi	r3,r1,STACK_FRAME_OVERHEAD
415	EXC_XFER_STD(0x700, program_check_exception)
416
417	EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_EE)
418	EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_EE)
419	EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_EE)
420	EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_EE)
421
422/* 0x0C00 - System Call Exception */
423	START_EXCEPTION(0x0C00,	SystemCall)
424	NORMAL_EXCEPTION_PROLOG
425	EXC_XFER_EE_LITE(0xc00, DoSyscall)
426
427	EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_EE)
428	EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_EE)
429	EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE)
430
431/* 0x1000 - Programmable Interval Timer (PIT) Exception */
432	START_EXCEPTION(0x1000, Decrementer)
433	NORMAL_EXCEPTION_PROLOG
434	lis	r0,TSR_PIS@h
435	mtspr	SPRN_TSR,r0		/* Clear the PIT exception */
436	addi	r3,r1,STACK_FRAME_OVERHEAD
437	EXC_XFER_LITE(0x1000, timer_interrupt)
438
439
440/* 0x1100 - Data TLB Miss Exception
441 * As the name implies, translation is not in the MMU, so search the
442 * page tables and fix it.  The only purpose of this function is to
443 * load TLB entries from the page table if they exist.
444 */
445	START_EXCEPTION(0x1100,	DTLBMiss)
446	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
447	mtspr	SPRN_SPRG1, r11
448#ifdef CONFIG_403GCX
449	stw     r12, 0(r0)
450	stw     r9, 4(r0)
451	mfcr    r11
452	mfspr   r12, SPRN_PID
453	stw     r11, 8(r0)
454	stw     r12, 12(r0)
455#else
456	mtspr	SPRN_SPRG4, r12
457	mtspr	SPRN_SPRG5, r9
458	mfcr	r11
459	mfspr	r12, SPRN_PID
460	mtspr	SPRN_SPRG7, r11
461	mtspr	SPRN_SPRG6, r12
462#endif
463	mfspr	r10, SPRN_DEAR		/* Get faulting address */
464
465	/* If we are faulting a kernel address, we have to use the
466	 * kernel page tables.
467	 */
468	lis	r11, TASK_SIZE@h
469	cmplw	r10, r11
470	blt+	3f
471	lis	r11, swapper_pg_dir@h
472	ori	r11, r11, swapper_pg_dir@l
473	li	r9, 0
474	mtspr	SPRN_PID, r9		/* TLB will have 0 TID */
475	b	4f
476
477	/* Get the PGD for the current thread.
478	 */
4793:
480	mfspr	r11,SPRN_SPRG3
481	lwz	r11,PGDIR(r11)
4824:
483	tophys(r11, r11)
484	rlwimi	r11, r10, 12, 20, 29	/* Create L1 (pgdir/pmd) address */
485	lwz	r12, 0(r11)		/* Get L1 entry */
486	andi.	r9, r12, _PMD_PRESENT	/* Check if it points to a PTE page */
487	beq	2f			/* Bail if no table */
488
489	rlwimi	r12, r10, 22, 20, 29	/* Compute PTE address */
490	lwz	r11, 0(r12)		/* Get Linux PTE */
491	andi.	r9, r11, _PAGE_PRESENT
492	beq	5f
493
494	ori	r11, r11, _PAGE_ACCESSED
495	stw	r11, 0(r12)
496
497	/* Create TLB tag.  This is the faulting address plus a static
498	 * set of bits.  These are size, valid, E, U0.
499	*/
500	li	r12, 0x00c0
501	rlwimi	r10, r12, 0, 20, 31
502
503	b	finish_tlb_load
504
5052:	/* Check for possible large-page pmd entry */
506	rlwinm.	r9, r12, 2, 22, 24
507	beq	5f
508
509	/* Create TLB tag.  This is the faulting address, plus a static
510	 * set of bits (valid, E, U0) plus the size from the PMD.
511	 */
512	ori	r9, r9, 0x40
513	rlwimi	r10, r9, 0, 20, 31
514	mr	r11, r12
515
516	b	finish_tlb_load
517
5185:
519	/* The bailout.  Restore registers to pre-exception conditions
520	 * and call the heavyweights to help us out.
521	 */
522#ifdef CONFIG_403GCX
523	lwz     r12, 12(r0)
524	lwz     r11, 8(r0)
525	mtspr   SPRN_PID, r12
526	mtcr    r11
527	lwz     r9, 4(r0)
528	lwz     r12, 0(r0)
529#else
530	mfspr	r12, SPRN_SPRG6
531	mfspr	r11, SPRN_SPRG7
532	mtspr	SPRN_PID, r12
533	mtcr	r11
534	mfspr	r9, SPRN_SPRG5
535	mfspr	r12, SPRN_SPRG4
536#endif
537	mfspr	r11, SPRN_SPRG1
538	mfspr	r10, SPRN_SPRG0
539	b	DataAccess
540
541/* 0x1200 - Instruction TLB Miss Exception
542 * Nearly the same as above, except we get our information from different
543 * registers and bailout to a different point.
544 */
545	START_EXCEPTION(0x1200,	ITLBMiss)
546	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
547	mtspr	SPRN_SPRG1, r11
548#ifdef CONFIG_403GCX
549	stw     r12, 0(r0)
550	stw     r9, 4(r0)
551	mfcr    r11
552	mfspr   r12, SPRN_PID
553	stw     r11, 8(r0)
554	stw     r12, 12(r0)
555#else
556	mtspr	SPRN_SPRG4, r12
557	mtspr	SPRN_SPRG5, r9
558	mfcr	r11
559	mfspr	r12, SPRN_PID
560	mtspr	SPRN_SPRG7, r11
561	mtspr	SPRN_SPRG6, r12
562#endif
563	mfspr	r10, SPRN_SRR0		/* Get faulting address */
564
565	/* If we are faulting a kernel address, we have to use the
566	 * kernel page tables.
567	 */
568	lis	r11, TASK_SIZE@h
569	cmplw	r10, r11
570	blt+	3f
571	lis	r11, swapper_pg_dir@h
572	ori	r11, r11, swapper_pg_dir@l
573	li	r9, 0
574	mtspr	SPRN_PID, r9		/* TLB will have 0 TID */
575	b	4f
576
577	/* Get the PGD for the current thread.
578	 */
5793:
580	mfspr	r11,SPRN_SPRG3
581	lwz	r11,PGDIR(r11)
5824:
583	tophys(r11, r11)
584	rlwimi	r11, r10, 12, 20, 29	/* Create L1 (pgdir/pmd) address */
585	lwz	r12, 0(r11)		/* Get L1 entry */
586	andi.	r9, r12, _PMD_PRESENT	/* Check if it points to a PTE page */
587	beq	2f			/* Bail if no table */
588
589	rlwimi	r12, r10, 22, 20, 29	/* Compute PTE address */
590	lwz	r11, 0(r12)		/* Get Linux PTE */
591	andi.	r9, r11, _PAGE_PRESENT
592	beq	5f
593
594	ori	r11, r11, _PAGE_ACCESSED
595	stw	r11, 0(r12)
596
597	/* Create TLB tag.  This is the faulting address plus a static
598	 * set of bits.  These are size, valid, E, U0.
599	*/
600	li	r12, 0x00c0
601	rlwimi	r10, r12, 0, 20, 31
602
603	b	finish_tlb_load
604
6052:	/* Check for possible large-page pmd entry */
606	rlwinm.	r9, r12, 2, 22, 24
607	beq	5f
608
609	/* Create TLB tag.  This is the faulting address, plus a static
610	 * set of bits (valid, E, U0) plus the size from the PMD.
611	 */
612	ori	r9, r9, 0x40
613	rlwimi	r10, r9, 0, 20, 31
614	mr	r11, r12
615
616	b	finish_tlb_load
617
6185:
619	/* The bailout.  Restore registers to pre-exception conditions
620	 * and call the heavyweights to help us out.
621	 */
622#ifdef CONFIG_403GCX
623	lwz     r12, 12(r0)
624	lwz     r11, 8(r0)
625	mtspr   SPRN_PID, r12
626	mtcr    r11
627	lwz     r9, 4(r0)
628	lwz     r12, 0(r0)
629#else
630	mfspr	r12, SPRN_SPRG6
631	mfspr	r11, SPRN_SPRG7
632	mtspr	SPRN_PID, r12
633	mtcr	r11
634	mfspr	r9, SPRN_SPRG5
635	mfspr	r12, SPRN_SPRG4
636#endif
637	mfspr	r11, SPRN_SPRG1
638	mfspr	r10, SPRN_SPRG0
639	b	InstructionAccess
640
641	EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_EE)
642	EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_EE)
643	EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
644	EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
645#ifdef CONFIG_IBM405_ERR51
646	/* 405GP errata 51 */
647	START_EXCEPTION(0x1700, Trap_17)
648	b DTLBMiss
649#else
650	EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
651#endif
652	EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
653	EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
654	EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_EE)
655	EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_EE)
656	EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_EE)
657	EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_EE)
658	EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_EE)
659	EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_EE)
660
661/* Check for a single step debug exception while in an exception
662 * handler before state has been saved.  This is to catch the case
663 * where an instruction that we are trying to single step causes
664 * an exception (eg ITLB/DTLB miss) and thus the first instruction of
665 * the exception handler generates a single step debug exception.
666 *
667 * If we get a debug trap on the first instruction of an exception handler,
668 * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is
669 * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR).
670 * The exception handler was handling a non-critical interrupt, so it will
671 * save (and later restore) the MSR via SPRN_SRR1, which will still have
672 * the MSR_DE bit set.
673 */
674	/* 0x2000 - Debug Exception */
675	START_EXCEPTION(0x2000, DebugTrap)
676	CRITICAL_EXCEPTION_PROLOG
677
678	/*
679	 * If this is a single step or branch-taken exception in an
680	 * exception entry sequence, it was probably meant to apply to
681	 * the code where the exception occurred (since exception entry
682	 * doesn't turn off DE automatically).  We simulate the effect
683	 * of turning off DE on entry to an exception handler by turning
684	 * off DE in the SRR3 value and clearing the debug status.
685	 */
686	mfspr	r10,SPRN_DBSR		/* check single-step/branch taken */
687	andis.	r10,r10,DBSR_IC@h
688	beq+	2f
689
690	andi.	r10,r9,MSR_IR|MSR_PR	/* check supervisor + MMU off */
691	beq	1f			/* branch and fix it up */
692
693	mfspr   r10,SPRN_SRR2		/* Faulting instruction address */
694	cmplwi  r10,0x2100
695	bgt+    2f			/* address above exception vectors */
696
697	/* here it looks like we got an inappropriate debug exception. */
6981:	rlwinm	r9,r9,0,~MSR_DE		/* clear DE in the SRR3 value */
699	lis	r10,DBSR_IC@h		/* clear the IC event */
700	mtspr	SPRN_DBSR,r10
701	/* restore state and get out */
702	lwz	r10,_CCR(r11)
703	lwz	r0,GPR0(r11)
704	lwz	r1,GPR1(r11)
705	mtcrf	0x80,r10
706	mtspr	SPRN_SRR2,r12
707	mtspr	SPRN_SRR3,r9
708	lwz	r9,GPR9(r11)
709	lwz	r12,GPR12(r11)
710	lwz	r10,crit_r10@l(0)
711	lwz	r11,crit_r11@l(0)
712	PPC405_ERR77_SYNC
713	rfci
714	b	.
715
716	/* continue normal handling for a critical exception... */
7172:	mfspr	r4,SPRN_DBSR
718	addi	r3,r1,STACK_FRAME_OVERHEAD
719	EXC_XFER_TEMPLATE(DebugException, 0x2002, \
720		(MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
721		NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
722
723/*
724 * The other Data TLB exceptions bail out to this point
725 * if they can't resolve the lightweight TLB fault.
726 */
727DataAccess:
728	NORMAL_EXCEPTION_PROLOG
729	mfspr	r5,SPRN_ESR		/* Grab the ESR, save it, pass arg3 */
730	stw	r5,_ESR(r11)
731	mfspr	r4,SPRN_DEAR		/* Grab the DEAR, save it, pass arg2 */
732	EXC_XFER_EE_LITE(0x300, handle_page_fault)
733
734/* Other PowerPC processors, namely those derived from the 6xx-series
735 * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved.
736 * However, for the 4xx-series processors these are neither defined nor
737 * reserved.
738 */
739
740	/* Damn, I came up one instruction too many to fit into the
741	 * exception space :-).  Both the instruction and data TLB
742	 * miss get to this point to load the TLB.
743	 * 	r10 - TLB_TAG value
744	 * 	r11 - Linux PTE
745	 *	r12, r9 - avilable to use
746	 *	PID - loaded with proper value when we get here
747	 *	Upon exit, we reload everything and RFI.
748	 * Actually, it will fit now, but oh well.....a common place
749	 * to load the TLB.
750	 */
751tlb_4xx_index:
752	.long	0
753finish_tlb_load:
754	/* load the next available TLB index.
755	*/
756	lwz	r9, tlb_4xx_index@l(0)
757	addi	r9, r9, 1
758	andi.	r9, r9, (PPC4XX_TLB_SIZE-1)
759	stw	r9, tlb_4xx_index@l(0)
760
7616:
762	/*
763	 * Clear out the software-only bits in the PTE to generate the
764	 * TLB_DATA value.  These are the bottom 2 bits of the RPM, the
765	 * top 3 bits of the zone field, and M.
766	 */
767	li	r12, 0x0ce2
768	andc	r11, r11, r12
769
770	tlbwe	r11, r9, TLB_DATA		/* Load TLB LO */
771	tlbwe	r10, r9, TLB_TAG		/* Load TLB HI */
772
773	/* Done...restore registers and get out of here.
774	*/
775#ifdef CONFIG_403GCX
776	lwz     r12, 12(r0)
777	lwz     r11, 8(r0)
778	mtspr   SPRN_PID, r12
779	mtcr    r11
780	lwz     r9, 4(r0)
781	lwz     r12, 0(r0)
782#else
783	mfspr	r12, SPRN_SPRG6
784	mfspr	r11, SPRN_SPRG7
785	mtspr	SPRN_PID, r12
786	mtcr	r11
787	mfspr	r9, SPRN_SPRG5
788	mfspr	r12, SPRN_SPRG4
789#endif
790	mfspr	r11, SPRN_SPRG1
791	mfspr	r10, SPRN_SPRG0
792	PPC405_ERR77_SYNC
793	rfi			/* Should sync shadow TLBs */
794	b	.		/* prevent prefetch past rfi */
795
796/* extern void giveup_fpu(struct task_struct *prev)
797 *
798 * The PowerPC 4xx family of processors do not have an FPU, so this just
799 * returns.
800 */
801_GLOBAL(giveup_fpu)
802	blr
803
804/* This is where the main kernel code starts.
805 */
806start_here:
807
808	/* ptr to current */
809	lis	r2,init_task@h
810	ori	r2,r2,init_task@l
811
812	/* ptr to phys current thread */
813	tophys(r4,r2)
814	addi	r4,r4,THREAD	/* init task's THREAD */
815	mtspr	SPRN_SPRG3,r4
816
817	/* stack */
818	lis	r1,init_thread_union@ha
819	addi	r1,r1,init_thread_union@l
820	li	r0,0
821	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
822
823	bl	early_init	/* We have to do this with MMU on */
824
825/*
826 * Decide what sort of machine this is and initialize the MMU.
827 */
828	mr	r3,r31
829	mr	r4,r30
830	mr	r5,r29
831	mr	r6,r28
832	mr	r7,r27
833	bl	machine_init
834	bl	MMU_init
835
836/* Go back to running unmapped so we can load up new values
837 * and change to using our exception vectors.
838 * On the 4xx, all we have to do is invalidate the TLB to clear
839 * the old 16M byte TLB mappings.
840 */
841	lis	r4,2f@h
842	ori	r4,r4,2f@l
843	tophys(r4,r4)
844	lis	r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h
845	ori	r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l
846	mtspr	SPRN_SRR0,r4
847	mtspr	SPRN_SRR1,r3
848	rfi
849	b	.		/* prevent prefetch past rfi */
850
851/* Load up the kernel context */
8522:
853	sync			/* Flush to memory before changing TLB */
854	tlbia
855	isync			/* Flush shadow TLBs */
856
857	/* set up the PTE pointers for the Abatron bdiGDB.
858	*/
859	lis	r6, swapper_pg_dir@h
860	ori	r6, r6, swapper_pg_dir@l
861	lis	r5, abatron_pteptrs@h
862	ori	r5, r5, abatron_pteptrs@l
863	stw	r5, 0xf0(r0)	/* Must match your Abatron config file */
864	tophys(r5,r5)
865	stw	r6, 0(r5)
866
867/* Now turn on the MMU for real! */
868	lis	r4,MSR_KERNEL@h
869	ori	r4,r4,MSR_KERNEL@l
870	lis	r3,start_kernel@h
871	ori	r3,r3,start_kernel@l
872	mtspr	SPRN_SRR0,r3
873	mtspr	SPRN_SRR1,r4
874	rfi			/* enable MMU and jump to start_kernel */
875	b	.		/* prevent prefetch past rfi */
876
877/* Set up the initial MMU state so we can do the first level of
878 * kernel initialization.  This maps the first 16 MBytes of memory 1:1
879 * virtual to physical and more importantly sets the cache mode.
880 */
881initial_mmu:
882	tlbia			/* Invalidate all TLB entries */
883	isync
884
885	/* We should still be executing code at physical address 0x0000xxxx
886	 * at this point. However, start_here is at virtual address
887	 * 0xC000xxxx. So, set up a TLB mapping to cover this once
888	 * translation is enabled.
889	 */
890
891	lis	r3,KERNELBASE@h		/* Load the kernel virtual address */
892	ori	r3,r3,KERNELBASE@l
893	tophys(r4,r3)			/* Load the kernel physical address */
894
895	iccci	r0,r3			/* Invalidate the i-cache before use */
896
897	/* Load the kernel PID.
898	*/
899	li	r0,0
900	mtspr	SPRN_PID,r0
901	sync
902
903	/* Configure and load two entries into TLB slots 62 and 63.
904	 * In case we are pinning TLBs, these are reserved in by the
905	 * other TLB functions.  If not reserving, then it doesn't
906	 * matter where they are loaded.
907	 */
908	clrrwi	r4,r4,10		/* Mask off the real page number */
909	ori	r4,r4,(TLB_WR | TLB_EX)	/* Set the write and execute bits */
910
911	clrrwi	r3,r3,10		/* Mask off the effective page number */
912	ori	r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M))
913
914        li      r0,63                    /* TLB slot 63 */
915
916	tlbwe	r4,r0,TLB_DATA		/* Load the data portion of the entry */
917	tlbwe	r3,r0,TLB_TAG		/* Load the tag portion of the entry */
918
919#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE)
920
921	/* Load a TLB entry for the UART, so that ppc4xx_progress() can use
922	 * the UARTs nice and early.  We use a 4k real==virtual mapping. */
923
924	lis	r3,SERIAL_DEBUG_IO_BASE@h
925	ori	r3,r3,SERIAL_DEBUG_IO_BASE@l
926	mr	r4,r3
927	clrrwi	r4,r4,12
928	ori	r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
929
930	clrrwi	r3,r3,12
931	ori	r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
932
933	li	r0,0			/* TLB slot 0 */
934	tlbwe	r4,r0,TLB_DATA
935	tlbwe	r3,r0,TLB_TAG
936#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */
937
938	isync
939
940	/* Establish the exception vector base
941	*/
942	lis	r4,KERNELBASE@h		/* EVPR only uses the high 16-bits */
943	tophys(r0,r4)			/* Use the physical address */
944	mtspr	SPRN_EVPR,r0
945
946	blr
947
948_GLOBAL(abort)
949        mfspr   r13,SPRN_DBCR0
950        oris    r13,r13,DBCR0_RST_SYSTEM@h
951        mtspr   SPRN_DBCR0,r13
952
953_GLOBAL(set_context)
954
955#ifdef CONFIG_BDI_SWITCH
956	/* Context switch the PTE pointer for the Abatron BDI2000.
957	 * The PGDIR is the second parameter.
958	 */
959	lis	r5, KERNELBASE@h
960	lwz	r5, 0xf0(r5)
961	stw	r4, 0x4(r5)
962#endif
963	sync
964	mtspr	SPRN_PID,r3
965	isync				/* Need an isync to flush shadow */
966					/* TLBs after changing PID */
967	blr
968
969/* We put a few things here that have to be page-aligned. This stuff
970 * goes at the beginning of the data segment, which is page-aligned.
971 */
972	.data
973	.align	12
974	.globl	sdata
975sdata:
976	.globl	empty_zero_page
977empty_zero_page:
978	.space	4096
979	.globl	swapper_pg_dir
980swapper_pg_dir:
981	.space	4096
982
983
984/* Stack for handling critical exceptions from kernel mode */
985	.section .bss
986        .align 12
987exception_stack_bottom:
988	.space	4096
989critical_stack_top:
990	.globl	exception_stack_top
991exception_stack_top:
992
993/* This space gets a copy of optional info passed to us by the bootstrap
994 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
995 */
996	.globl	cmd_line
997cmd_line:
998	.space	512
999
1000/* Room for two PTE pointers, usually the kernel and current user pointers
1001 * to their respective root page table.
1002 */
1003abatron_pteptrs:
1004	.space	8
1005