1/*
2 *  $Id: hashtable.S,v 1.1.1.1 2007/08/03 18:52:12 Exp $
3 *
4 *  PowerPC version
5 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 *  Adapted for Power Macintosh by Paul Mackerras.
9 *  Low-level exception handlers and MMU support
10 *  rewritten by Paul Mackerras.
11 *    Copyright (C) 1996 Paul Mackerras.
12 *
13 *  This file contains low-level assembler routines for managing
14 *  the PowerPC MMU hash table.  (PPC 8xx processors don't use a
15 *  hash table, so this file is not used on them.)
16 *
17 *  This program is free software; you can redistribute it and/or
18 *  modify it under the terms of the GNU General Public License
19 *  as published by the Free Software Foundation; either version
20 *  2 of the License, or (at your option) any later version.
21 *
22 */
23
24#include <asm/processor.h>
25#include <asm/page.h>
26#include <asm/pgtable.h>
27#include <asm/cputable.h>
28#include <asm/ppc_asm.h>
29#include <asm/thread_info.h>
30#include <asm/asm-offsets.h>
31
32#ifdef CONFIG_SMP
33	.section .bss
34	.align	2
35	.globl mmu_hash_lock
36mmu_hash_lock:
37	.space	4
38#endif /* CONFIG_SMP */
39
40/*
41 * Sync CPUs with hash_page taking & releasing the hash
42 * table lock
43 */
44#ifdef CONFIG_SMP
45	.text
46_GLOBAL(hash_page_sync)
47	lis	r8,mmu_hash_lock@h
48	ori	r8,r8,mmu_hash_lock@l
49	lis	r0,0x0fff
50	b	10f
5111:	lwz	r6,0(r8)
52	cmpwi	0,r6,0
53	bne	11b
5410:	lwarx	r6,0,r8
55	cmpwi	0,r6,0
56	bne-	11b
57	stwcx.	r0,0,r8
58	bne-	10b
59	isync
60	eieio
61	li	r0,0
62	stw	r0,0(r8)
63	blr
64#endif
65
66/*
67 * Load a PTE into the hash table, if possible.
68 * The address is in r4, and r3 contains an access flag:
69 * _PAGE_RW (0x400) if a write.
70 * r9 contains the SRR1 value, from which we use the MSR_PR bit.
71 * SPRG3 contains the physical address of the current task's thread.
72 *
73 * Returns to the caller if the access is illegal or there is no
74 * mapping for the address.  Otherwise it places an appropriate PTE
75 * in the hash table and returns from the exception.
76 * Uses r0, r3 - r8, ctr, lr.
77 */
78	.text
79_GLOBAL(hash_page)
80	tophys(r7,0)			/* gets -KERNELBASE into r7 */
81#ifdef CONFIG_SMP
82	addis	r8,r7,mmu_hash_lock@h
83	ori	r8,r8,mmu_hash_lock@l
84	lis	r0,0x0fff
85	b	10f
8611:	lwz	r6,0(r8)
87	cmpwi	0,r6,0
88	bne	11b
8910:	lwarx	r6,0,r8
90	cmpwi	0,r6,0
91	bne-	11b
92	stwcx.	r0,0,r8
93	bne-	10b
94	isync
95#endif
96	/* Get PTE (linux-style) and check access */
97	lis	r0,KERNELBASE@h		/* check if kernel address */
98	cmplw	0,r4,r0
99	mfspr	r8,SPRN_SPRG3		/* current task's THREAD (phys) */
100	ori	r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
101	lwz	r5,PGDIR(r8)		/* virt page-table root */
102	blt+	112f			/* assume user more likely */
103	lis	r5,swapper_pg_dir@ha	/* if kernel address, use */
104	addi	r5,r5,swapper_pg_dir@l	/* kernel page table */
105	rlwimi	r3,r9,32-12,29,29	/* MSR_PR -> _PAGE_USER */
106112:	add	r5,r5,r7		/* convert to phys addr */
107	rlwimi	r5,r4,12,20,29		/* insert top 10 bits of address */
108	lwz	r8,0(r5)		/* get pmd entry */
109	rlwinm.	r8,r8,0,0,19		/* extract address of pte page */
110#ifdef CONFIG_SMP
111	beq-	hash_page_out		/* return if no mapping */
112#else
113	beqlr-
114#endif
115	rlwimi	r8,r4,22,20,29		/* insert next 10 bits of address */
116	rlwinm	r0,r3,32-3,24,24	/* _PAGE_RW access -> _PAGE_DIRTY */
117	ori	r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
118
119	/*
120	 * Update the linux PTE atomically.  We do the lwarx up-front
121	 * because almost always, there won't be a permission violation
122	 * and there won't already be an HPTE, and thus we will have
123	 * to update the PTE to set _PAGE_HASHPTE.  -- paulus.
124	 */
125retry:
126	lwarx	r6,0,r8			/* get linux-style pte */
127	andc.	r5,r3,r6		/* check access & ~permission */
128#ifdef CONFIG_SMP
129	bne-	hash_page_out		/* return if access not permitted */
130#else
131	bnelr-
132#endif
133	or	r5,r0,r6		/* set accessed/dirty bits */
134	stwcx.	r5,0,r8			/* attempt to update PTE */
135	bne-	retry			/* retry if someone got there first */
136
137	mfsrin	r3,r4			/* get segment reg for segment */
138	mfctr	r0
139	stw	r0,_CTR(r11)
140	bl	create_hpte		/* add the hash table entry */
141
142/*
143 * htab_reloads counts the number of times we have to fault an
144 * HPTE into the hash table.  This should only happen after a
145 * fork (because fork does a flush_tlb_mm) or a vmalloc or ioremap.
146 * Where a page is faulted into a process's address space,
147 * update_mmu_cache gets called to put the HPTE into the hash table
148 * and those are counted as preloads rather than reloads.
149 */
150	addis	r8,r7,htab_reloads@ha
151	lwz	r3,htab_reloads@l(r8)
152	addi	r3,r3,1
153	stw	r3,htab_reloads@l(r8)
154
155#ifdef CONFIG_SMP
156	eieio
157	addis	r8,r7,mmu_hash_lock@ha
158	li	r0,0
159	stw	r0,mmu_hash_lock@l(r8)
160#endif
161
162	/* Return from the exception */
163	lwz	r5,_CTR(r11)
164	mtctr	r5
165	lwz	r0,GPR0(r11)
166	lwz	r7,GPR7(r11)
167	lwz	r8,GPR8(r11)
168	b	fast_exception_return
169
170#ifdef CONFIG_SMP
171hash_page_out:
172	eieio
173	addis	r8,r7,mmu_hash_lock@ha
174	li	r0,0
175	stw	r0,mmu_hash_lock@l(r8)
176	blr
177#endif /* CONFIG_SMP */
178
179/*
180 * Add an entry for a particular page to the hash table.
181 *
182 * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
183 *
184 * We assume any necessary modifications to the pte (e.g. setting
185 * the accessed bit) have already been done and that there is actually
186 * a hash table in use (i.e. we're not on a 603).
187 */
188_GLOBAL(add_hash_page)
189	mflr	r0
190	stw	r0,4(r1)
191
192	/* Convert context and va to VSID */
193	mulli	r3,r3,897*16		/* multiply context by context skew */
194	rlwinm	r0,r4,4,28,31		/* get ESID (top 4 bits of va) */
195	mulli	r0,r0,0x111		/* multiply by ESID skew */
196	add	r3,r3,r0		/* note create_hpte trims to 24 bits */
197
198#ifdef CONFIG_SMP
199	rlwinm	r8,r1,0,0,18		/* use cpu number to make tag */
200	lwz	r8,TI_CPU(r8)		/* to go in mmu_hash_lock */
201	oris	r8,r8,12
202#endif /* CONFIG_SMP */
203
204	/*
205	 * We disable interrupts here, even on UP, because we don't
206	 * want to race with hash_page, and because we want the
207	 * _PAGE_HASHPTE bit to be a reliable indication of whether
208	 * the HPTE exists (or at least whether one did once).
209	 * We also turn off the MMU for data accesses so that we
210	 * we can't take a hash table miss (assuming the code is
211	 * covered by a BAT).  -- paulus
212	 */
213	mfmsr	r10
214	SYNC
215	rlwinm	r0,r10,0,17,15		/* clear bit 16 (MSR_EE) */
216	rlwinm	r0,r0,0,28,26		/* clear MSR_DR */
217	mtmsr	r0
218	SYNC_601
219	isync
220
221	tophys(r7,0)
222
223#ifdef CONFIG_SMP
224	addis	r9,r7,mmu_hash_lock@ha
225	addi	r9,r9,mmu_hash_lock@l
22610:	lwarx	r0,0,r9			/* take the mmu_hash_lock */
227	cmpi	0,r0,0
228	bne-	11f
229	stwcx.	r8,0,r9
230	beq+	12f
23111:	lwz	r0,0(r9)
232	cmpi	0,r0,0
233	beq	10b
234	b	11b
23512:	isync
236#endif
237
238	/*
239	 * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
240	 * If _PAGE_HASHPTE was already set, we don't replace the existing
241	 * HPTE, so we just unlock and return.
242	 */
243	mr	r8,r5
244	rlwimi	r8,r4,22,20,29
2451:	lwarx	r6,0,r8
246	andi.	r0,r6,_PAGE_HASHPTE
247	bne	9f			/* if HASHPTE already set, done */
248	ori	r5,r6,_PAGE_HASHPTE
249	stwcx.	r5,0,r8
250	bne-	1b
251
252	bl	create_hpte
253
254	addis	r8,r7,htab_preloads@ha
255	lwz	r3,htab_preloads@l(r8)
256	addi	r3,r3,1
257	stw	r3,htab_preloads@l(r8)
258
2599:
260#ifdef CONFIG_SMP
261	eieio
262	li	r0,0
263	stw	r0,0(r9)		/* clear mmu_hash_lock */
264#endif
265
266	/* reenable interrupts and DR */
267	mtmsr	r10
268	SYNC_601
269	isync
270
271	lwz	r0,4(r1)
272	mtlr	r0
273	blr
274
275/*
276 * This routine adds a hardware PTE to the hash table.
277 * It is designed to be called with the MMU either on or off.
278 * r3 contains the VSID, r4 contains the virtual address,
279 * r5 contains the linux PTE, r6 contains the old value of the
280 * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
281 * offset to be added to addresses (0 if the MMU is on,
282 * -KERNELBASE if it is off).
283 * On SMP, the caller should have the mmu_hash_lock held.
284 * We assume that the caller has (or will) set the _PAGE_HASHPTE
285 * bit in the linux PTE in memory.  The value passed in r6 should
286 * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
287 * this routine will skip the search for an existing HPTE.
288 * This procedure modifies r0, r3 - r6, r8, cr0.
289 *  -- paulus.
290 *
291 * For speed, 4 of the instructions get patched once the size and
292 * physical address of the hash table are known.  These definitions
293 * of Hash_base and Hash_bits below are just an example.
294 */
295Hash_base = 0xc0180000
296Hash_bits = 12				/* e.g. 256kB hash table */
297Hash_msk = (((1 << Hash_bits) - 1) * 64)
298
299/* defines for the PTE format for 32-bit PPCs */
300#define PTE_SIZE	8
301#define PTEG_SIZE	64
302#define LG_PTEG_SIZE	6
303#define LDPTEu		lwzu
304#define STPTE		stw
305#define CMPPTE		cmpw
306#define PTE_H		0x40
307#define PTE_V		0x80000000
308#define TST_V(r)	rlwinm. r,r,0,0,0
309#define SET_V(r)	oris r,r,PTE_V@h
310#define CLR_V(r,t)	rlwinm r,r,0,1,31
311
312#define HASH_LEFT	31-(LG_PTEG_SIZE+Hash_bits-1)
313#define HASH_RIGHT	31-LG_PTEG_SIZE
314
315_GLOBAL(create_hpte)
316	/* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
317	rlwinm	r8,r5,32-10,31,31	/* _PAGE_RW -> PP lsb */
318	rlwinm	r0,r5,32-7,31,31	/* _PAGE_DIRTY -> PP lsb */
319	and	r8,r8,r0		/* writable if _RW & _DIRTY */
320	rlwimi	r5,r5,32-1,30,30	/* _PAGE_USER -> PP msb */
321	rlwimi	r5,r5,32-2,31,31	/* _PAGE_USER -> PP lsb */
322	ori	r8,r8,0xe14		/* clear out reserved bits and M */
323	andc	r8,r5,r8		/* PP = user? (rw&dirty? 2: 3): 0 */
324BEGIN_FTR_SECTION
325	ori	r8,r8,_PAGE_COHERENT	/* set M (coherence required) */
326END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
327
328	/* Construct the high word of the PPC-style PTE (r5) */
329	rlwinm	r5,r3,7,1,24		/* put VSID in 0x7fffff80 bits */
330	rlwimi	r5,r4,10,26,31		/* put in API (abbrev page index) */
331	SET_V(r5)			/* set V (valid) bit */
332
333	/* Get the address of the primary PTE group in the hash table (r3) */
334_GLOBAL(hash_page_patch_A)
335	addis	r0,r7,Hash_base@h	/* base address of hash table */
336	rlwimi	r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
337	rlwinm	r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
338	xor	r3,r3,r0		/* make primary hash */
339	li	r0,8			/* PTEs/group */
340
341	/*
342	 * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
343	 * if it is clear, meaning that the HPTE isn't there already...
344	 */
345	andi.	r6,r6,_PAGE_HASHPTE
346	beq+	10f			/* no PTE: go look for an empty slot */
347	tlbie	r4
348
349	addis	r4,r7,htab_hash_searches@ha
350	lwz	r6,htab_hash_searches@l(r4)
351	addi	r6,r6,1			/* count how many searches we do */
352	stw	r6,htab_hash_searches@l(r4)
353
354	/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
355	mtctr	r0
356	addi	r4,r3,-PTE_SIZE
3571:	LDPTEu	r6,PTE_SIZE(r4)		/* get next PTE */
358	CMPPTE	0,r6,r5
359	bdnzf	2,1b			/* loop while ctr != 0 && !cr0.eq */
360	beq+	found_slot
361
362	/* Search the secondary PTEG for a matching PTE */
363	ori	r5,r5,PTE_H		/* set H (secondary hash) bit */
364_GLOBAL(hash_page_patch_B)
365	xoris	r4,r3,Hash_msk>>16	/* compute secondary hash */
366	xori	r4,r4,(-PTEG_SIZE & 0xffff)
367	addi	r4,r4,-PTE_SIZE
368	mtctr	r0
3692:	LDPTEu	r6,PTE_SIZE(r4)
370	CMPPTE	0,r6,r5
371	bdnzf	2,2b
372	beq+	found_slot
373	xori	r5,r5,PTE_H		/* clear H bit again */
374
375	/* Search the primary PTEG for an empty slot */
37610:	mtctr	r0
377	addi	r4,r3,-PTE_SIZE		/* search primary PTEG */
3781:	LDPTEu	r6,PTE_SIZE(r4)		/* get next PTE */
379	TST_V(r6)			/* test valid bit */
380	bdnzf	2,1b			/* loop while ctr != 0 && !cr0.eq */
381	beq+	found_empty
382
383	/* update counter of times that the primary PTEG is full */
384	addis	r4,r7,primary_pteg_full@ha
385	lwz	r6,primary_pteg_full@l(r4)
386	addi	r6,r6,1
387	stw	r6,primary_pteg_full@l(r4)
388
389	/* Search the secondary PTEG for an empty slot */
390	ori	r5,r5,PTE_H		/* set H (secondary hash) bit */
391_GLOBAL(hash_page_patch_C)
392	xoris	r4,r3,Hash_msk>>16	/* compute secondary hash */
393	xori	r4,r4,(-PTEG_SIZE & 0xffff)
394	addi	r4,r4,-PTE_SIZE
395	mtctr	r0
3962:	LDPTEu	r6,PTE_SIZE(r4)
397	TST_V(r6)
398	bdnzf	2,2b
399	beq+	found_empty
400	xori	r5,r5,PTE_H		/* clear H bit again */
401
402	/*
403	 * Choose an arbitrary slot in the primary PTEG to overwrite.
404	 * Since both the primary and secondary PTEGs are full, and we
405	 * have no information that the PTEs in the primary PTEG are
406	 * more important or useful than those in the secondary PTEG,
407	 * and we know there is a definite (although small) speed
408	 * advantage to putting the PTE in the primary PTEG, we always
409	 * put the PTE in the primary PTEG.
410	 */
411	addis	r4,r7,next_slot@ha
412	lwz	r6,next_slot@l(r4)
413	addi	r6,r6,PTE_SIZE
414	andi.	r6,r6,7*PTE_SIZE
415	stw	r6,next_slot@l(r4)
416	add	r4,r3,r6
417
418	/* update counter of evicted pages */
419	addis	r6,r7,htab_evicts@ha
420	lwz	r3,htab_evicts@l(r6)
421	addi	r3,r3,1
422	stw	r3,htab_evicts@l(r6)
423
424#ifndef CONFIG_SMP
425	/* Store PTE in PTEG */
426found_empty:
427	STPTE	r5,0(r4)
428found_slot:
429	STPTE	r8,PTE_SIZE/2(r4)
430
431#else /* CONFIG_SMP */
432/*
433 * Between the tlbie above and updating the hash table entry below,
434 * another CPU could read the hash table entry and put it in its TLB.
435 * There are 3 cases:
436 * 1. using an empty slot
437 * 2. updating an earlier entry to change permissions (i.e. enable write)
438 * 3. taking over the PTE for an unrelated address
439 *
440 * In each case it doesn't really matter if the other CPUs have the old
441 * PTE in their TLB.  So we don't need to bother with another tlbie here,
442 * which is convenient as we've overwritten the register that had the
443 * address. :-)  The tlbie above is mainly to make sure that this CPU comes
444 * and gets the new PTE from the hash table.
445 *
446 * We do however have to make sure that the PTE is never in an invalid
447 * state with the V bit set.
448 */
449found_empty:
450found_slot:
451	CLR_V(r5,r0)		/* clear V (valid) bit in PTE */
452	STPTE	r5,0(r4)
453	sync
454	TLBSYNC
455	STPTE	r8,PTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
456	sync
457	SET_V(r5)
458	STPTE	r5,0(r4)	/* finally set V bit in PTE */
459#endif /* CONFIG_SMP */
460
461	sync		/* make sure pte updates get to memory */
462	blr
463
464	.section .bss
465	.align	2
466next_slot:
467	.space	4
468	.globl primary_pteg_full
469primary_pteg_full:
470	.space	4
471	.globl htab_hash_searches
472htab_hash_searches:
473	.space	4
474	.previous
475
476/*
477 * Flush the entry for a particular page from the hash table.
478 *
479 * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
480 *		    int count)
481 *
482 * We assume that there is a hash table in use (Hash != 0).
483 */
484_GLOBAL(flush_hash_pages)
485	tophys(r7,0)
486
487	/*
488	 * We disable interrupts here, even on UP, because we want
489	 * the _PAGE_HASHPTE bit to be a reliable indication of
490	 * whether the HPTE exists (or at least whether one did once).
491	 * We also turn off the MMU for data accesses so that we
492	 * we can't take a hash table miss (assuming the code is
493	 * covered by a BAT).  -- paulus
494	 */
495	mfmsr	r10
496	SYNC
497	rlwinm	r0,r10,0,17,15		/* clear bit 16 (MSR_EE) */
498	rlwinm	r0,r0,0,28,26		/* clear MSR_DR */
499	mtmsr	r0
500	SYNC_601
501	isync
502
503	/* First find a PTE in the range that has _PAGE_HASHPTE set */
504	rlwimi	r5,r4,22,20,29
5051:	lwz	r0,0(r5)
506	cmpwi	cr1,r6,1
507	andi.	r0,r0,_PAGE_HASHPTE
508	bne	2f
509	ble	cr1,19f
510	addi	r4,r4,0x1000
511	addi	r5,r5,4
512	addi	r6,r6,-1
513	b	1b
514
515	/* Convert context and va to VSID */
5162:	mulli	r3,r3,897*16		/* multiply context by context skew */
517	rlwinm	r0,r4,4,28,31		/* get ESID (top 4 bits of va) */
518	mulli	r0,r0,0x111		/* multiply by ESID skew */
519	add	r3,r3,r0		/* note code below trims to 24 bits */
520
521	/* Construct the high word of the PPC-style PTE (r11) */
522	rlwinm	r11,r3,7,1,24		/* put VSID in 0x7fffff80 bits */
523	rlwimi	r11,r4,10,26,31		/* put in API (abbrev page index) */
524	SET_V(r11)			/* set V (valid) bit */
525
526#ifdef CONFIG_SMP
527	addis	r9,r7,mmu_hash_lock@ha
528	addi	r9,r9,mmu_hash_lock@l
529	rlwinm	r8,r1,0,0,18
530	add	r8,r8,r7
531	lwz	r8,TI_CPU(r8)
532	oris	r8,r8,9
53310:	lwarx	r0,0,r9
534	cmpi	0,r0,0
535	bne-	11f
536	stwcx.	r8,0,r9
537	beq+	12f
53811:	lwz	r0,0(r9)
539	cmpi	0,r0,0
540	beq	10b
541	b	11b
54212:	isync
543#endif
544
545	/*
546	 * Check the _PAGE_HASHPTE bit in the linux PTE.  If it is
547	 * already clear, we're done (for this pte).  If not,
548	 * clear it (atomically) and proceed.  -- paulus.
549	 */
55033:	lwarx	r8,0,r5			/* fetch the pte */
551	andi.	r0,r8,_PAGE_HASHPTE
552	beq	8f			/* done if HASHPTE is already clear */
553	rlwinm	r8,r8,0,31,29		/* clear HASHPTE bit */
554	stwcx.	r8,0,r5			/* update the pte */
555	bne-	33b
556
557	/* Get the address of the primary PTE group in the hash table (r3) */
558_GLOBAL(flush_hash_patch_A)
559	addis	r8,r7,Hash_base@h	/* base address of hash table */
560	rlwimi	r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
561	rlwinm	r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
562	xor	r8,r0,r8		/* make primary hash */
563
564	/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
565	li	r0,8			/* PTEs/group */
566	mtctr	r0
567	addi	r12,r8,-PTE_SIZE
5681:	LDPTEu	r0,PTE_SIZE(r12)	/* get next PTE */
569	CMPPTE	0,r0,r11
570	bdnzf	2,1b			/* loop while ctr != 0 && !cr0.eq */
571	beq+	3f
572
573	/* Search the secondary PTEG for a matching PTE */
574	ori	r11,r11,PTE_H		/* set H (secondary hash) bit */
575	li	r0,8			/* PTEs/group */
576_GLOBAL(flush_hash_patch_B)
577	xoris	r12,r8,Hash_msk>>16	/* compute secondary hash */
578	xori	r12,r12,(-PTEG_SIZE & 0xffff)
579	addi	r12,r12,-PTE_SIZE
580	mtctr	r0
5812:	LDPTEu	r0,PTE_SIZE(r12)
582	CMPPTE	0,r0,r11
583	bdnzf	2,2b
584	xori	r11,r11,PTE_H		/* clear H again */
585	bne-	4f			/* should rarely fail to find it */
586
5873:	li	r0,0
588	STPTE	r0,0(r12)		/* invalidate entry */
5894:	sync
590	tlbie	r4			/* in hw tlb too */
591	sync
592
5938:	ble	cr1,9f			/* if all ptes checked */
59481:	addi	r6,r6,-1
595	addi	r5,r5,4			/* advance to next pte */
596	addi	r4,r4,0x1000
597	lwz	r0,0(r5)		/* check next pte */
598	cmpwi	cr1,r6,1
599	andi.	r0,r0,_PAGE_HASHPTE
600	bne	33b
601	bgt	cr1,81b
602
6039:
604#ifdef CONFIG_SMP
605	TLBSYNC
606	li	r0,0
607	stw	r0,0(r9)		/* clear mmu_hash_lock */
608#endif
609
61019:	mtmsr	r10
611	SYNC_601
612	isync
613	blr
614