• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/powerpc/kernel/
1#include <linux/threads.h>
2#include <asm/processor.h>
3#include <asm/page.h>
4#include <asm/cputable.h>
5#include <asm/thread_info.h>
6#include <asm/ppc_asm.h>
7#include <asm/asm-offsets.h>
8#include <asm/mmu.h>
9
10/*
11 * Structure for storing CPU registers on the save area.
12 */
13#define SL_SP		0
14#define SL_PC		4
15#define SL_MSR		8
16#define SL_SDR1		0xc
17#define SL_SPRG0	0x10	/* 4 sprg's */
18#define SL_DBAT0	0x20
19#define SL_IBAT0	0x28
20#define SL_DBAT1	0x30
21#define SL_IBAT1	0x38
22#define SL_DBAT2	0x40
23#define SL_IBAT2	0x48
24#define SL_DBAT3	0x50
25#define SL_IBAT3	0x58
26#define SL_TB		0x60
27#define SL_R2		0x68
28#define SL_CR		0x6c
29#define SL_LR		0x70
30#define SL_R12		0x74	/* r12 to r31 */
31#define SL_SIZE		(SL_R12 + 80)
32
33	.section .data
34	.align	5
35
36_GLOBAL(swsusp_save_area)
37	.space	SL_SIZE
38
39
40	.section .text
41	.align	5
42
43_GLOBAL(swsusp_arch_suspend)
44
45	lis	r11,swsusp_save_area@h
46	ori	r11,r11,swsusp_save_area@l
47
48	mflr	r0
49	stw	r0,SL_LR(r11)
50	mfcr	r0
51	stw	r0,SL_CR(r11)
52	stw	r1,SL_SP(r11)
53	stw	r2,SL_R2(r11)
54	stmw	r12,SL_R12(r11)
55
56	/* Save MSR & SDR1 */
57	mfmsr	r4
58	stw	r4,SL_MSR(r11)
59	mfsdr1	r4
60	stw	r4,SL_SDR1(r11)
61
62	/* Get a stable timebase and save it */
631:	mftbu	r4
64	stw	r4,SL_TB(r11)
65	mftb	r5
66	stw	r5,SL_TB+4(r11)
67	mftbu	r3
68	cmpw	r3,r4
69	bne	1b
70
71	/* Save SPRGs */
72	mfsprg	r4,0
73	stw	r4,SL_SPRG0(r11)
74	mfsprg	r4,1
75	stw	r4,SL_SPRG0+4(r11)
76	mfsprg	r4,2
77	stw	r4,SL_SPRG0+8(r11)
78	mfsprg	r4,3
79	stw	r4,SL_SPRG0+12(r11)
80
81	/* Save BATs */
82	mfdbatu	r4,0
83	stw	r4,SL_DBAT0(r11)
84	mfdbatl	r4,0
85	stw	r4,SL_DBAT0+4(r11)
86	mfdbatu	r4,1
87	stw	r4,SL_DBAT1(r11)
88	mfdbatl	r4,1
89	stw	r4,SL_DBAT1+4(r11)
90	mfdbatu	r4,2
91	stw	r4,SL_DBAT2(r11)
92	mfdbatl	r4,2
93	stw	r4,SL_DBAT2+4(r11)
94	mfdbatu	r4,3
95	stw	r4,SL_DBAT3(r11)
96	mfdbatl	r4,3
97	stw	r4,SL_DBAT3+4(r11)
98	mfibatu	r4,0
99	stw	r4,SL_IBAT0(r11)
100	mfibatl	r4,0
101	stw	r4,SL_IBAT0+4(r11)
102	mfibatu	r4,1
103	stw	r4,SL_IBAT1(r11)
104	mfibatl	r4,1
105	stw	r4,SL_IBAT1+4(r11)
106	mfibatu	r4,2
107	stw	r4,SL_IBAT2(r11)
108	mfibatl	r4,2
109	stw	r4,SL_IBAT2+4(r11)
110	mfibatu	r4,3
111	stw	r4,SL_IBAT3(r11)
112	mfibatl	r4,3
113	stw	r4,SL_IBAT3+4(r11)
114
115	/* Call the low level suspend stuff (we should probably have made
116	 * a stackframe...
117	 */
118	bl	swsusp_save
119
120	/* Restore LR from the save area */
121	lis	r11,swsusp_save_area@h
122	ori	r11,r11,swsusp_save_area@l
123	lwz	r0,SL_LR(r11)
124	mtlr	r0
125
126	blr
127
128
129/* Resume code */
130_GLOBAL(swsusp_arch_resume)
131
132#ifdef CONFIG_ALTIVEC
133	/* Stop pending alitvec streams and memory accesses */
134BEGIN_FTR_SECTION
135	DSSALL
136END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
137#endif
138 	sync
139
140	/* Disable MSR:DR to make sure we don't take a TLB or
141	 * hash miss during the copy, as our hash table will
142	 * for a while be unuseable. For .text, we assume we are
143	 * covered by a BAT. This works only for non-G5 at this
144	 * point. G5 will need a better approach, possibly using
145	 * a small temporary hash table filled with large mappings,
146	 * disabling the MMU completely isn't a good option for
147	 * performance reasons.
148	 * (Note that 750's may have the same performance issue as
149	 * the G5 in this case, we should investigate using moving
150	 * BATs for these CPUs)
151	 */
152	mfmsr	r0
153	sync
154	rlwinm	r0,r0,0,28,26		/* clear MSR_DR */
155	mtmsr	r0
156	sync
157	isync
158
159	/* Load ptr the list of pages to copy in r3 */
160	lis	r11,(restore_pblist - KERNELBASE)@h
161	ori	r11,r11,restore_pblist@l
162	lwz	r10,0(r11)
163
164	/* Copy the pages. This is a very basic implementation, to
165	 * be replaced by something more cache efficient */
1661:
167	tophys(r3,r10)
168	li	r0,256
169	mtctr	r0
170	lwz	r11,pbe_address(r3)	/* source */
171	tophys(r5,r11)
172	lwz	r10,pbe_orig_address(r3)	/* destination */
173	tophys(r6,r10)
1742:
175	lwz	r8,0(r5)
176	lwz	r9,4(r5)
177	lwz	r10,8(r5)
178	lwz	r11,12(r5)
179	addi	r5,r5,16
180	stw	r8,0(r6)
181	stw	r9,4(r6)
182	stw	r10,8(r6)
183	stw	r11,12(r6)
184	addi	r6,r6,16
185	bdnz	2b
186	lwz		r10,pbe_next(r3)
187	cmpwi	0,r10,0
188	bne	1b
189
190	/* Do a very simple cache flush/inval of the L1 to ensure
191	 * coherency of the icache
192	 */
193	lis	r3,0x0002
194	mtctr	r3
195	li	r3, 0
1961:
197	lwz	r0,0(r3)
198	addi	r3,r3,0x0020
199	bdnz	1b
200	isync
201	sync
202
203	/* Now flush those cache lines */
204	lis	r3,0x0002
205	mtctr	r3
206	li	r3, 0
2071:
208	dcbf	0,r3
209	addi	r3,r3,0x0020
210	bdnz	1b
211	sync
212
213	/* Ok, we are now running with the kernel data of the old
214	 * kernel fully restored. We can get to the save area
215	 * easily now. As for the rest of the code, it assumes the
216	 * loader kernel and the booted one are exactly identical
217	 */
218	lis	r11,swsusp_save_area@h
219	ori	r11,r11,swsusp_save_area@l
220	tophys(r11,r11)
221
222	/* Restore the BATs, and SDR1.  Then we can turn on the MMU.
223	 * This is a bit hairy as we are running out of those BATs,
224	 * but first, our code is probably in the icache, and we are
225	 * writing the same value to the BAT, so that should be fine,
226	 * though a better solution will have to be found long-term
227	 */
228	lwz	r4,SL_SDR1(r11)
229	mtsdr1	r4
230	lwz	r4,SL_SPRG0(r11)
231	mtsprg	0,r4
232	lwz	r4,SL_SPRG0+4(r11)
233	mtsprg	1,r4
234	lwz	r4,SL_SPRG0+8(r11)
235	mtsprg	2,r4
236	lwz	r4,SL_SPRG0+12(r11)
237	mtsprg	3,r4
238
239
240BEGIN_MMU_FTR_SECTION
241	li	r4,0
242	mtspr	SPRN_DBAT4U,r4
243	mtspr	SPRN_DBAT4L,r4
244	mtspr	SPRN_DBAT5U,r4
245	mtspr	SPRN_DBAT5L,r4
246	mtspr	SPRN_DBAT6U,r4
247	mtspr	SPRN_DBAT6L,r4
248	mtspr	SPRN_DBAT7U,r4
249	mtspr	SPRN_DBAT7L,r4
250	mtspr	SPRN_IBAT4U,r4
251	mtspr	SPRN_IBAT4L,r4
252	mtspr	SPRN_IBAT5U,r4
253	mtspr	SPRN_IBAT5L,r4
254	mtspr	SPRN_IBAT6U,r4
255	mtspr	SPRN_IBAT6L,r4
256	mtspr	SPRN_IBAT7U,r4
257	mtspr	SPRN_IBAT7L,r4
258END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
259
260	/* Flush all TLBs */
261	lis	r4,0x1000
2621:	addic.	r4,r4,-0x1000
263	tlbie	r4
264	bgt	1b
265	sync
266
267	/* restore the MSR and turn on the MMU */
268	lwz	r3,SL_MSR(r11)
269	bl	turn_on_mmu
270	tovirt(r11,r11)
271
272	/* Restore TB */
273	li	r3,0
274	mttbl	r3
275	lwz	r3,SL_TB(r11)
276	lwz	r4,SL_TB+4(r11)
277	mttbu	r3
278	mttbl	r4
279
280	/* Kick decrementer */
281	li	r0,1
282	mtdec	r0
283
284	/* Restore the callee-saved registers and return */
285	lwz	r0,SL_CR(r11)
286	mtcr	r0
287	lwz	r2,SL_R2(r11)
288	lmw	r12,SL_R12(r11)
289	lwz	r1,SL_SP(r11)
290	lwz	r0,SL_LR(r11)
291	mtlr	r0
292
293
294	li	r3,0
295	blr
296
297turn_on_mmu:
298	mflr	r4
299	mtsrr0	r4
300	mtsrr1	r3
301	sync
302	isync
303	rfi
304