1#include <linux/threads.h>
2#include <asm/processor.h>
3#include <asm/page.h>
4#include <asm/cputable.h>
5#include <asm/thread_info.h>
6#include <asm/ppc_asm.h>
7#include <asm/asm-offsets.h>
8
9
10/*
11 * Structure for storing CPU registers on the save area.
12 */
13#define SL_SP		0
14#define SL_PC		4
15#define SL_MSR		8
16#define SL_SDR1		0xc
17#define SL_SPRG0	0x10	/* 4 sprg's */
18#define SL_DBAT0	0x20
19#define SL_IBAT0	0x28
20#define SL_DBAT1	0x30
21#define SL_IBAT1	0x38
22#define SL_DBAT2	0x40
23#define SL_IBAT2	0x48
24#define SL_DBAT3	0x50
25#define SL_IBAT3	0x58
26#define SL_TB		0x60
27#define SL_R2		0x68
28#define SL_CR		0x6c
29#define SL_LR		0x70
30#define SL_R12		0x74	/* r12 to r31 */
31#define SL_SIZE		(SL_R12 + 80)
32
33	.section .data
34	.align	5
35
36_GLOBAL(swsusp_save_area)
37	.space	SL_SIZE
38
39
40	.section .text
41	.align	5
42
43_GLOBAL(swsusp_arch_suspend)
44
45	lis	r11,swsusp_save_area@h
46	ori	r11,r11,swsusp_save_area@l
47
48	mflr	r0
49	stw	r0,SL_LR(r11)
50	mfcr	r0
51	stw	r0,SL_CR(r11)
52	stw	r1,SL_SP(r11)
53	stw	r2,SL_R2(r11)
54	stmw	r12,SL_R12(r11)
55
56	/* Save MSR & SDR1 */
57	mfmsr	r4
58	stw	r4,SL_MSR(r11)
59	mfsdr1	r4
60	stw	r4,SL_SDR1(r11)
61
62	/* Get a stable timebase and save it */
631:	mftbu	r4
64	stw	r4,SL_TB(r11)
65	mftb	r5
66	stw	r5,SL_TB+4(r11)
67	mftbu	r3
68	cmpw	r3,r4
69	bne	1b
70
71	/* Save SPRGs */
72	mfsprg	r4,0
73	stw	r4,SL_SPRG0(r11)
74	mfsprg	r4,1
75	stw	r4,SL_SPRG0+4(r11)
76	mfsprg	r4,2
77	stw	r4,SL_SPRG0+8(r11)
78	mfsprg	r4,3
79	stw	r4,SL_SPRG0+12(r11)
80
81	/* Save BATs */
82	mfdbatu	r4,0
83	stw	r4,SL_DBAT0(r11)
84	mfdbatl	r4,0
85	stw	r4,SL_DBAT0+4(r11)
86	mfdbatu	r4,1
87	stw	r4,SL_DBAT1(r11)
88	mfdbatl	r4,1
89	stw	r4,SL_DBAT1+4(r11)
90	mfdbatu	r4,2
91	stw	r4,SL_DBAT2(r11)
92	mfdbatl	r4,2
93	stw	r4,SL_DBAT2+4(r11)
94	mfdbatu	r4,3
95	stw	r4,SL_DBAT3(r11)
96	mfdbatl	r4,3
97	stw	r4,SL_DBAT3+4(r11)
98	mfibatu	r4,0
99	stw	r4,SL_IBAT0(r11)
100	mfibatl	r4,0
101	stw	r4,SL_IBAT0+4(r11)
102	mfibatu	r4,1
103	stw	r4,SL_IBAT1(r11)
104	mfibatl	r4,1
105	stw	r4,SL_IBAT1+4(r11)
106	mfibatu	r4,2
107	stw	r4,SL_IBAT2(r11)
108	mfibatl	r4,2
109	stw	r4,SL_IBAT2+4(r11)
110	mfibatu	r4,3
111	stw	r4,SL_IBAT3(r11)
112	mfibatl	r4,3
113	stw	r4,SL_IBAT3+4(r11)
114
115	/* Call the low level suspend stuff (we should probably have made
116	 * a stackframe...
117	 */
118	bl	swsusp_save
119
120	/* Restore LR from the save area */
121	lis	r11,swsusp_save_area@h
122	ori	r11,r11,swsusp_save_area@l
123	lwz	r0,SL_LR(r11)
124	mtlr	r0
125
126	blr
127
128
129/* Resume code */
130_GLOBAL(swsusp_arch_resume)
131
132	/* Stop pending alitvec streams and memory accesses */
133BEGIN_FTR_SECTION
134	DSSALL
135END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
136 	sync
137
138	/* Disable MSR:DR to make sure we don't take a TLB or
139	 * hash miss during the copy, as our hash table will
140	 * for a while be unuseable. For .text, we assume we are
141	 * covered by a BAT. This works only for non-G5 at this
142	 * point. G5 will need a better approach, possibly using
143	 * a small temporary hash table filled with large mappings,
144	 * disabling the MMU completely isn't a good option for
145	 * performance reasons.
146	 * (Note that 750's may have the same performance issue as
147	 * the G5 in this case, we should investigate using moving
148	 * BATs for these CPUs)
149	 */
150	mfmsr	r0
151	sync
152	rlwinm	r0,r0,0,28,26		/* clear MSR_DR */
153	mtmsr	r0
154	sync
155	isync
156
157	/* Load ptr the list of pages to copy in r3 */
158	lis	r11,(restore_pblist - KERNELBASE)@h
159	ori	r11,r11,restore_pblist@l
160	lwz	r10,0(r11)
161
162	/* Copy the pages. This is a very basic implementation, to
163	 * be replaced by something more cache efficient */
1641:
165	tophys(r3,r10)
166	li	r0,256
167	mtctr	r0
168	lwz	r11,pbe_address(r3)	/* source */
169	tophys(r5,r11)
170	lwz	r10,pbe_orig_address(r3)	/* destination */
171	tophys(r6,r10)
1722:
173	lwz	r8,0(r5)
174	lwz	r9,4(r5)
175	lwz	r10,8(r5)
176	lwz	r11,12(r5)
177	addi	r5,r5,16
178	stw	r8,0(r6)
179	stw	r9,4(r6)
180	stw	r10,8(r6)
181	stw	r11,12(r6)
182	addi	r6,r6,16
183	bdnz	2b
184	lwz		r10,pbe_next(r3)
185	cmpwi	0,r10,0
186	bne	1b
187
188	/* Do a very simple cache flush/inval of the L1 to ensure
189	 * coherency of the icache
190	 */
191	lis	r3,0x0002
192	mtctr	r3
193	li	r3, 0
1941:
195	lwz	r0,0(r3)
196	addi	r3,r3,0x0020
197	bdnz	1b
198	isync
199	sync
200
201	/* Now flush those cache lines */
202	lis	r3,0x0002
203	mtctr	r3
204	li	r3, 0
2051:
206	dcbf	0,r3
207	addi	r3,r3,0x0020
208	bdnz	1b
209	sync
210
211	/* Ok, we are now running with the kernel data of the old
212	 * kernel fully restored. We can get to the save area
213	 * easily now. As for the rest of the code, it assumes the
214	 * loader kernel and the booted one are exactly identical
215	 */
216	lis	r11,swsusp_save_area@h
217	ori	r11,r11,swsusp_save_area@l
218	tophys(r11,r11)
219
220	/* Restore the BATs, and SDR1.  Then we can turn on the MMU.
221	 * This is a bit hairy as we are running out of those BATs,
222	 * but first, our code is probably in the icache, and we are
223	 * writing the same value to the BAT, so that should be fine,
224	 * though a better solution will have to be found long-term
225	 */
226	lwz	r4,SL_SDR1(r11)
227	mtsdr1	r4
228	lwz	r4,SL_SPRG0(r11)
229	mtsprg	0,r4
230	lwz	r4,SL_SPRG0+4(r11)
231	mtsprg	1,r4
232	lwz	r4,SL_SPRG0+8(r11)
233	mtsprg	2,r4
234	lwz	r4,SL_SPRG0+12(r11)
235	mtsprg	3,r4
236
237
238BEGIN_FTR_SECTION
239	li	r4,0
240	mtspr	SPRN_DBAT4U,r4
241	mtspr	SPRN_DBAT4L,r4
242	mtspr	SPRN_DBAT5U,r4
243	mtspr	SPRN_DBAT5L,r4
244	mtspr	SPRN_DBAT6U,r4
245	mtspr	SPRN_DBAT6L,r4
246	mtspr	SPRN_DBAT7U,r4
247	mtspr	SPRN_DBAT7L,r4
248	mtspr	SPRN_IBAT4U,r4
249	mtspr	SPRN_IBAT4L,r4
250	mtspr	SPRN_IBAT5U,r4
251	mtspr	SPRN_IBAT5L,r4
252	mtspr	SPRN_IBAT6U,r4
253	mtspr	SPRN_IBAT6L,r4
254	mtspr	SPRN_IBAT7U,r4
255	mtspr	SPRN_IBAT7L,r4
256END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
257
258	/* Flush all TLBs */
259	lis	r4,0x1000
2601:	addic.	r4,r4,-0x1000
261	tlbie	r4
262	blt	1b
263	sync
264
265	/* restore the MSR and turn on the MMU */
266	lwz	r3,SL_MSR(r11)
267	bl	turn_on_mmu
268	tovirt(r11,r11)
269
270	/* Restore TB */
271	li	r3,0
272	mttbl	r3
273	lwz	r3,SL_TB(r11)
274	lwz	r4,SL_TB+4(r11)
275	mttbu	r3
276	mttbl	r4
277
278	/* Kick decrementer */
279	li	r0,1
280	mtdec	r0
281
282	/* Restore the callee-saved registers and return */
283	lwz	r0,SL_CR(r11)
284	mtcr	r0
285	lwz	r2,SL_R2(r11)
286	lmw	r12,SL_R12(r11)
287	lwz	r1,SL_SP(r11)
288	lwz	r0,SL_LR(r11)
289	mtlr	r0
290
291
292	li	r3,0
293	blr
294
295turn_on_mmu:
296	mflr	r4
297	mtsrr0	r4
298	mtsrr1	r3
299	sync
300	isync
301	rfi
302